<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JFR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id>
      <journal-title>JMIR Formative Research</journal-title>
      <issn pub-type="epub">2561-326X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i1e88488</article-id>
      <article-id pub-id-type="pmid">42013453</article-id>
      <article-id pub-id-type="doi">10.2196/88488</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Autoencoder-Enhanced Convolutional Neural Networks for Plantar Pressure–Based Gait Pattern Recognition: Model Development and Cross-Validated Evaluation Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Steenstra</surname>
            <given-names>Ivan</given-names>
          </name>
        </contrib>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Aref</surname>
            <given-names>Mohamed Hisham Fouad</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Singhal</surname>
            <given-names>Abhinav</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Chang</surname>
            <given-names>Chuan-Chun</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0008-2708-0684</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Lung</surname>
            <given-names>Chi-Wen</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7048-2493</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Jan</surname>
            <given-names>Yih-Kuen</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7149-4034</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Lu</surname>
            <given-names>Qi-Qian</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0004-3756-2872</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Wang</surname>
            <given-names>Yi-You</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0009-0004-4614-2976</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Chen</surname>
            <given-names>Yi-Sheng</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2631-406X</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Liau</surname>
            <given-names>Ben-Yi</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Automatic Control Engineering</institution>
            <institution>Feng Chia University</institution>
            <addr-line>No. 100 Wenhua Road, Xitun District</addr-line>
            <addr-line>Taichung, 407102</addr-line>
            <country>Taiwan</country>
            <phone>886 4 24517250 ext 3915</phone>
            <email>byliau@fcu.edu.tw</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6857-8656</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Automatic Control Engineering</institution>
        <institution>Feng Chia University</institution>
        <addr-line>Taichung</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Creative Product Design</institution>
        <institution>Asia University</institution>
        <addr-line>Taichung</addr-line>
        <country>Taiwan</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Rehabilitation Engineering Lab</institution>
        <institution>Department of Health and Kinesiology</institution>
        <institution>University of Illinois Urbana-Champaign</institution>
        <addr-line>Urbana, IL</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Communications Engineering</institution>
        <institution>Feng Chia University</institution>
        <addr-line>Taichung</addr-line>
        <country>Taiwan</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Ben-Yi Liau <email>byliau@fcu.edu.tw</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2026</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>21</day>
        <month>4</month>
        <year>2026</year>
      </pub-date>
      <volume>10</volume>
      <elocation-id>e88488</elocation-id>
      <history>
        <date date-type="received">
          <day>26</day>
          <month>11</month>
          <year>2025</year>
        </date>
        <date date-type="rev-request">
          <day>8</day>
          <month>2</month>
          <year>2026</year>
        </date>
        <date date-type="rev-recd">
          <day>31</day>
          <month>3</month>
          <year>2026</year>
        </date>
        <date date-type="accepted">
          <day>31</day>
          <month>3</month>
          <year>2026</year>
        </date>
      </history>
      <copyright-statement>©Chuan-Chun Chang, Chi-Wen Lung, Yih-Kuen Jan, Qi-Qian Lu, Yi-You Wang, Yi-Sheng Chen, Ben-Yi Liau. Originally published in JMIR Formative Research (https://formative.jmir.org), 21.04.2026.</copyright-statement>
      <copyright-year>2026</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on https://formative.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://formative.jmir.org/2026/1/e88488" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Plantar pressure imaging is a stable modality that reflects gait-related biomechanical characteristics and has been used increasingly for gait assessment and recognition. However, plantar pressure images are high dimensional and nonlinear, making manual feature engineering and conventional machine learning insufficient to capture discriminative patterns.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to develop a gait pattern recognition model based on plantar pressure using an autoencoder (AE)-enhanced convolutional neural network (CNN) and to evaluate its performance against baseline deep learning and classical machine learning approaches.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A total of 13 healthy volunteers (aged 18-24 years) were recruited. Plantar pressure data were collected during treadmill walking using an in-shoe pressure measurement system and converted into frame-wise plantar pressure images. We compared a lightweight CNN (Light CNN), an AE-CNN cascade model, and an encoder-augmented CNN with an additional bottleneck layer. Model development used participant-wise data partitioning, and performance was evaluated using accuracy, precision, recall, and <italic>F</italic><sub>1</sub>-score.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The proposed encoder-augmented CNN achieved the best overall performance (<italic>F</italic><sub>1</sub>-score=96.20%), outperforming the Light CNN (<italic>F</italic><sub>1</sub>-score=94.44%) and AE-CNN cascade (<italic>F</italic><sub>1</sub>-score=92.45%). Confusion matrices and learning curves further indicated stable training behavior and consistent classification performance across gait patterns.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Integrating representation learning (AE-based compression) with CNN-based classification improved the recognition of gait patterns from plantar pressure images. This pilot study included only healthy participants. Future work should validate generalizability in larger and clinically diverse cohorts and further investigate participant-level evaluation and model interpretability, as well as deployment feasibility.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>plantar pressure</kwd>
        <kwd>gait</kwd>
        <kwd>convolutional neural networks</kwd>
        <kwd>autoencoder</kwd>
        <kwd>wearable sensors</kwd>
        <kwd>deep learning</kwd>
        <kwd>machine learning</kwd>
        <kwd>biomedical engineering</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>With the rapid development of smart health care and artificial intelligence, the analysis of physiological signals has become increasingly important in fields such as disease diagnosis, rehabilitation monitoring, and identity verification. Among these signals, plantar pressure imaging is a stable method that reflects the posture of the individual, the characteristics of the gait, and foot functionality. Due to its ease of collection and visualization, plantar pressure imaging has been widely used for behavioral analysis, clinical evaluation, and biometric recognition. Gait is a fundamental human activity that represents the coordinated movement of the lower limbs. Although often overlooked because of its automaticity, gait involves complex and synchronized interactions between the musculoskeletal system and neural control. Understanding gait dynamics is important to assess gait alterations and support rehabilitation planning. Previous studies have investigated gait analysis using deep spatiotemporal learning and machine learning, including pathological gait classification, gait phase detection, pilot gait classification studies, and deep learning–based gait trajectory modeling [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. Typically, the gait cycle can be described by the stance and swing phases, and more detailed phase characterization may provide additional functional information [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref4">4</xref>].</p>
      <p>Traditional gait and plantar pressure analysis relied on manual feature extraction and classical machine learning algorithms. However, plantar pressure data are typically high dimensional and nonlinear, making it difficult for handcrafted features to capture informative patterns consistently. In recent years, deep learning, particularly convolutional neural networks (CNNs), has shown superior performance in automatically extracting features and improving classification accuracy. Autoencoder (AE) techniques have also been used to reduce dimensionality and alleviate redundancy, improving representation compactness and computational efficiency for high-dimensional inputs. Previous research has explored plantar pressure–based assessment in health-related applications. Deschamps et al [<xref ref-type="bibr" rid="ref5">5</xref>] analyzed plantar pressure distribution patterns in people with diabetes, and Amemiya et al [<xref ref-type="bibr" rid="ref6">6</xref>] investigated the relationships between elevated plantar pressure and gait characteristics in patients with diabetes. Wang et al [<xref ref-type="bibr" rid="ref7">7</xref>] developed an insole-based gait monitoring technique to recognize gait patterns associated with knee osteoarthritis. These studies highlight the value of plantar pressure analysis to quantify lower limb loading characteristics and support gait-related assessment. Other researchers have investigated computational models for gait recognition. Nguyen et al [<xref ref-type="bibr" rid="ref8">8</xref>] used smart shoes to classify ambulatory activities and proposed statistical characteristics combined with conventional classifiers, and Jeong et al [<xref ref-type="bibr" rid="ref9">9</xref>] studied the classification of activity with respect to stairs using plantar pressure sensors. Jun et al [<xref ref-type="bibr" rid="ref10">10</xref>] further proposed a hybrid deep learning framework that integrates plantar pressure images and 3D skeletal data, indicating that multimodal fusion can improve the recognition of abnormal gait patterns compared to pressure-only inputs. Beyond classification, related gait evaluation studies have compared optimization strategies for sensory data classification using deep neural networks [<xref ref-type="bibr" rid="ref11">11</xref>] and evaluated machine learning algorithms for electromyography pattern classification in gait disorders [<xref ref-type="bibr" rid="ref12">12</xref>], underscoring the broader demand for robust learning pipelines. To address the need for portable, real-time applications, Cho [<xref ref-type="bibr" rid="ref13">13</xref>] developed a deep learning approach using plantar pressure signals to estimate walking speed and gait-related classification tasks. Chhoeum et al [<xref ref-type="bibr" rid="ref14">14</xref>] applied CNN-based regression to estimate knee joint angles using foot pressure mapping images. Ling et al [<xref ref-type="bibr" rid="ref15">15</xref>] introduced an AE-CNN–based multisource data fusion framework to estimate the step length of the gait motion, illustrating the practical value of representation learning when handling high-dimensional gait-related data. Ardhianto et al [<xref ref-type="bibr" rid="ref16">16</xref>] formulated the estimation of the foot progression angle as an object detection task on plantar pressure images using YOLO (You Only Look Once)-based models, demonstrating the feasibility of computer vision–style pipelines on pressure maps. From a system-level perspective, Zhou et al [<xref ref-type="bibr" rid="ref17">17</xref>] developed a gait detection and plantar pressure analysis system using a flexible triboelectric pressure sensor array and deep learning, highlighting the direction of continuous wearable gait monitoring under real-world constraints. Collectively, these studies suggest that plantar pressure imaging is advantageous due to its stability, visualization quality, and strong correspondence with gait cycles. Building on these advances, there remains a need for pressure-only deep learning frameworks that can learn discriminative representations from high-dimensional plantar pressure images, reduce redundancy via representation learning (eg, AEs), and maintain computational efficiency. Importantly, clinical claims should be supported by validation in clinically diverse cohorts; therefore, model development and evaluation in controlled settings should be clearly distinguished from future clinical verification.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Design and Workflow</title>
        <p>The general workflow of this study is illustrated in <xref rid="figure1" ref-type="fig">Figure 1</xref>. The experimental design consisted of 3 major components. The first component involved the collection of plantar pressure response data required to generate plantar pressure images. The second component focused on the application of machine learning classifiers and a deep learning model based on a CNN. The third component addressed the AE, which was further investigated as a core part of this study. In the final stage of the experiments, the CNN and AE frameworks were integrated to perform classification and performance evaluation.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Overall workflow of the proposed plantar pressure–based recognition system.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>As shown in <xref rid="figure1" ref-type="fig">Figure 1</xref>, the experimental workflow highlights the sequential architecture of the study. In particular, the second component, which encompasses both classifiers and CNN-based models, is presented in separate blocks. The detailed structures of these blocks are illustrated in <xref rid="figure2" ref-type="fig">Figures 2</xref> and <xref rid="figure3" ref-type="fig">3</xref>.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Model development framework that includes the autoencoder (AE)–convolutional neural network (CNN) pipeline and traditional classifiers. AE: autoencoder; AE-CNN: autoencoder convolutional neural network; KNN: k-nearest neighbors; SVM: support vector machine.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Architectures of the convolutional neural network (CNN) and encoder-augmented CNN models.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>In the classifier block diagram, the AE-CNN cascade is defined as an AE in which compressed data are modularized and sequentially integrated with a CNN. This design enables the model to be trained in a staged manner, thereby forming the AE-CNN cascade. Furthermore, because its data processing pathway resembles that of conventional classifiers, the AE-CNN cascade was grouped within the classifier block for consistency in experimental design.</p>
        <p>In the deep learning block diagram, the encoder-augmented CNN is defined as a hybrid model in which the encoder component of the AE is directly integrated into the CNN classifier. This design enables end-to-end training, allowing the encoder and CNN to fuse into a unified framework. By adopting this hybrid strategy, the model is trained as an integrated architecture, which is referred to as encoder-augmented CNN.</p>
      </sec>
      <sec>
        <title>Participants and Experimental Setup</title>
        <p>A total of 13 healthy university student volunteers (aged 18-24 years) were recruited for this study. Exclusion criteria included current or previous foot ulcers, diabetes, vascular disease, hypertension, inability to walk independently for at least 10 minutes, and continued use of medications that could affect gait. Only participants who met all eligibility criteria were enrolled.</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This study was approved by the Central Regional Research Ethics Committee of China Medical University, Taichung, Taiwan (approval CRREC-112-130). All participants received a full explanation of the study procedures and provided their written informed consent prior to participation. The data were deidentified for analysis and reporting. No financial compensation was provided to participants in this study.</p>
      </sec>
      <sec>
        <title>Plantar Pressure Measurement</title>
        <p>Plantar pressure data were acquired using the Tekscan F-Scan in-shoe pressure measurement system (<xref rid="figure4" ref-type="fig">Figure 4</xref> [<xref ref-type="bibr" rid="ref18">18</xref>]). The F-Scan provides high-resolution plantar pressure distribution with real-time acquisition via a dedicated data cable, making it suitable for gait analysis, sports science, insole design, and clinical applications. The key functions adopted in this study are summarized in <xref ref-type="table" rid="table1">Table 1</xref>. In particular, the system’s real-time recording capability and high spatial resolution enabled the capture of subtle pressure changes, thereby improving the fidelity of the experimental dataset.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>F-Scan plantar pressure sensor: (A) data collection wearing mode and (B) pressure sensing sheet [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Main functions of the F-Scan plantar pressure sensor.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="220"/>
            <col width="340"/>
            <col width="440"/>
            <thead>
              <tr valign="top">
                <td>Item</td>
                <td>Explanation</td>
                <td>Application</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Plantar pressure measurement</td>
                <td>Unit: kPa</td>
                <td>Measurement of plantar pressure distribution</td>
              </tr>
              <tr valign="top">
                <td>Gait analysis</td>
                <td>Analysis of step length, step frequency, and gait cycle</td>
                <td>Walking, running, and abnormal gait</td>
              </tr>
              <tr valign="top">
                <td>High-resolution sensor</td>
                <td>0.5 cm² per sensing unit; visualization of subtle pressure variations</td>
                <td>Detailed plantar pressure analysis</td>
              </tr>
              <tr valign="top">
                <td>Calibration function</td>
                <td>Three calibration methods: walking, gait, and point modes</td>
                <td>Normal gait analysis, abnormal gait analysis, and plantar pressure distribution while standing</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Foot Pressure Data Acquisition</title>
        <p>Plantar pressure responses were recorded using the Tekscan F-Scan in-shoe system while participants walked on a treadmill at a fixed speed. Each trial began with a 1-minute familiarization period to stabilize gait at the target speed. The F-Scan microsensors sampled plantar pressure at 25 Hz, and instantaneous pressures were stored as matrix-valued frames and exported in CSV format for downstream processing.</p>
        <p>Frame selection was based on gait cycles, each consisting of a stance phase and a swing phase (<xref rid="figure5" ref-type="fig">Figure 5</xref>). The pressure value at each time point was calculated as the sum of pressure readings across all sensing elements (kPa) as an overall loading indicator. In <xref rid="figure5" ref-type="fig">Figure 5</xref>, the green bracket denotes 10 gait cycles, blue markers indicate frames within the stance phase, and red markers denote the swing phase. As the plantar load is negligible during swing, the sensors yield near-zero readings. Therefore, to enable frame-wise classification of gait patterns and only the stance-phase frames of the 10 cycles were retained for analysis.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Example of the plantar pressure response across gait cycles: the green bracket indicates 10 gait cycles, blue markers denote stance-phase frames, and red markers denote swing-phase frames.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The final dataset comprised 6994 frames in 3 gait patterns: slow walking (2590 frames), fast walking (2162 frames), and uphill walking (2242 frames). These frames served as input to the proposed models for training and evaluation.</p>
      </sec>
      <sec>
        <title>Image Preprocessing</title>
        <p>Matrix-form plantar pressure signals exported by the F-Scan system were converted into frame-wise instantaneous plantar pressure matrices using Python 3.10.12 (Python Software Foundation) to enable deep learning. Each frame-wise matrix was normalized to the range (0, 1) using min-max normalization and rendered directly as a grayscale intensity image, in which pixel intensity represents normalized pressure magnitude. For visualization (<xref rid="figure6" ref-type="fig">Figure 6</xref>A), we additionally rendered the same normalized matrices as pseudocolor pressure maps using the perceptually uniform viridis colormap. The viridis colormap was used for visualization only. The grayscale images were resized to 64×64 pixels to ensure consistency across samples, and Gaussian noise was injected as a lightweight augmentation to improve noise robustness and mitigate overfitting. After preprocessing, the resulting input tensor for model training and evaluation had the shape (6994, 64, 64, 1).</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>(A) Example of an image of plantar pressure distribution rendered with a perceptually uniform viridis colormap. (B) The corresponding grayscale image used as the model input after preprocessing.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Model Architecture</title>
        <p>In this study, 3 deep learning architectures were developed to address plantar pressure image classification, each designed with distinct structural characteristics. The first was a lightweight CNN (Light CNN), which served as the baseline model. It was constructed with 3 convolutional layers, each followed by max-pooling to progressively reduce dimensionality while preserving salient spatial features. Batch normalization was incorporated to stabilize the training process, while dropout layers were used to mitigate overfitting. A fully connected layer and a final softmax classifier were used to output the probabilities of the 3 gait categories, namely, slow walking, fast walking, and uphill walking. The structural design of this network is summarized in <xref ref-type="table" rid="table2">Table 2</xref>, which establishes the baseline framework for subsequent comparisons.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Light convolutional neural network model architecture.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="190"/>
            <col width="200"/>
            <col width="380"/>
            <col width="230"/>
            <thead>
              <tr valign="top">
                <td>Tasks</td>
                <td colspan="2">Structural parameters</td>
                <td>Training parameters (activation function)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Layer</td>
                <td>Detailed parameters</td>
                <td>
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>Input image</td>
                <td>(64, 64, 1)</td>
                <td>—<sup>a</sup></td>
              </tr>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>Conv2D_1</td>
                <td>32,(3, 3), padding=“same”; L2 regularization (λ=0.0005)</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>MaxPool2D</td>
                <td>(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>Conv2D_2</td>
                <td>64,(3, 3), padding=“same”; L2 regularization (λ=0.0005)</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>MaxPool2D</td>
                <td>(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>Conv2D_3</td>
                <td>128,(3, 3), padding=“same”; L2 regularization (λ=0.0005)</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>MaxPool2D</td>
                <td>(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Feature extraction</td>
                <td>Dropout (0.2)</td>
                <td>—</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Multiclass classification</td>
                <td>Dense</td>
                <td>90</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Multiclass classification</td>
                <td>Dense</td>
                <td>60</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Multiclass classification</td>
                <td>Dense</td>
                <td>3</td>
                <td>Softmax</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td>K-fold</td>
                <td>9<break/>  <break/>  </td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td>Batch size</td>
                <td>64</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td>Epochs</td>
                <td>100</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td>Loss function</td>
                <td>categorical_crossentropy</td>
                <td>—<break/>  <break/>  </td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td>Optimizer</td>
                <td>Adam (learning rate=0.0005)</td>
                <td>—<break/>  <break/>  </td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td>Metrics</td>
                <td>accuracy</td>
                <td> —</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>Not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>The second model was the AE-CNN cascade, which modularized the integration of an AE and a CNN classifier. In this design, the encoder of the AE first compressed the high-dimensional plantar pressure matrices into a compact latent representation, while the decoder simultaneously ensured that essential structural information could be reconstructed. The compressed latent features were then transferred to a CNN classifier for further convolutional processing and final classification. This staged cascade design effectively separated feature compression from classification, improving both the stability and the interpretability of the learning process. The encoder architecture used in the AE-CNN cascade is presented in <xref ref-type="table" rid="table3">Table 3</xref>, illustrating how feature reduction and classification were sequentially integrated.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Autoencoder encoder model architecture.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="120"/>
            <col width="150"/>
            <col width="300"/>
            <col width="430"/>
            <thead>
              <tr valign="top">
                <td>Task</td>
                <td colspan="2">Structural parameters</td>
                <td>Training parameters (activation function)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Layer</td>
                <td>Detailed parameters</td>
                <td>
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Encoded</td>
                <td>Input image</td>
                <td>(64, 64, 1)</td>
                <td>—<sup>a</sup></td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>Conv2D_1</td>
                <td>32, (3, 3), padding=“same”</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>MaxPool2D</td>
                <td>(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>Conv2D_2</td>
                <td>64, (3, 3), padding=“same”</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>MaxPool2D</td>
                <td>(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>Conv2D_3</td>
                <td>128, (3,3), padding=“same”</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>MaxPool2D</td>
                <td>(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>Flatten</td>
                <td>—</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Encoded</td>
                <td>Dense</td>
                <td>128</td>
                <td>—</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>Not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>To evaluate generalization while reducing the risk of information leakage from correlated frame-wise samples, data partitioning was performed at the participant level. Participants (n=13) were first split into a development set (n=10, 76.9%) and an independent held-out test set (n=3, 23.1%). Within the development set, a participant-wise grouped 9-fold cross-validation procedure was applied for model selection and stability assessment. In each fold, all frame-wise samples from participants in the training folds were used for model fitting, and all samples from participants in the held-out fold were used for validation. As 10 is not evenly divisible by 9, fold sizes differed by at most 1 participant (8 folds contained 1 participant and 1 fold contained 2 participants). After cross-validation, the final model configuration was retrained on the full development set and evaluated once on the held-out test set. Performance was reported using accuracy, precision, recall, and <italic>F</italic><sub>1</sub>-score.</p>
        <p>The third architecture was the encoder-augmented CNN, which further extended the integration of AE and CNN by embedding the encoder directly into the CNN pipeline. Unlike the cascade structure, this hybrid design adopted an end-to-end framework, where the encoder served as the initial feature extractor and its outputs were directly connected to the subsequent CNN layers. This approach allowed the encoder and CNN to be jointly optimized, combining compact representation learning with the discriminative power of deep convolutional layers. The architectural layout of this model is summarized in <xref ref-type="table" rid="table4">Table 4</xref>, highlighting its streamlined structure and enhanced learning efficiency.</p>
        <table-wrap position="float" id="table4">
          <label>Table 4</label>
          <caption>
            <p>Encoder-augmented convolutional neural network model architecture.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="270"/>
            <col width="210"/>
            <col width="0"/>
            <col width="300"/>
            <col width="0"/>
            <col width="220"/>
            <thead>
              <tr valign="top">
                <td>Task</td>
                <td colspan="4">Structural parameters</td>
                <td>Training parameters (activation function)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Layer</td>
                <td colspan="3">Detailed parameters</td>
                <td>
                  <break/>
                </td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Input image</td>
                <td colspan="3">(64, 64, 1)</td>
                <td>—<sup>a</sup></td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Conv2D_1</td>
                <td colspan="3">32,(3, 3), padding=“same”; L2 regularization (λ=0.0005)</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Batch normalization</td>
                <td colspan="3">—</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>MaxPool2D</td>
                <td colspan="3">(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Conv2D_2</td>
                <td colspan="3">64,(3, 3), padding=“same”; L2 regularization (λ=0.0005)</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>MaxPool2D</td>
                <td colspan="3">(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Conv2D_3</td>
                <td colspan="3">128,(3, 3), padding=“same”; L2 regularization (λ=0.0005)</td>
                <td>Leaky ReLU</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>MaxPool2D</td>
                <td colspan="3">(2, 2), padding=“same”</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Dropout(0.2)</td>
                <td colspan="3">—</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Flatten</td>
                <td colspan="3">—</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Spatial and feature compression</td>
                <td>Dense (bottleneck)</td>
                <td colspan="3">128</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Multiclass classification</td>
                <td>Dense</td>
                <td colspan="3">90</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Multiclass classification</td>
                <td>Dense</td>
                <td colspan="3">60</td>
                <td>—</td>
              </tr>
              <tr valign="top">
                <td>Multiclass classification</td>
                <td>Dense</td>
                <td colspan="3">3</td>
                <td>Softmax</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td colspan="2">K-fold,<break/>  <break/>  </td>
                <td>9</td>
                <td colspan="2">—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td colspan="2">Batch size</td>
                <td>64</td>
                <td colspan="2">—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td colspan="2">Epochs</td>
                <td>100</td>
                <td colspan="2">—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td colspan="2">Loss function</td>
                <td>categorical_crossentropy</td>
                <td colspan="2">—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td colspan="2">Optimizer</td>
                <td>Adam (learning rate=0.0005)</td>
                <td colspan="2">—</td>
              </tr>
              <tr valign="top">
                <td>Other parameters</td>
                <td colspan="2">Metrics</td>
                <td>accuracy</td>
                <td colspan="2">—</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table4fn1">
              <p><sup>a</sup>Not applicable.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Evaluation Metrics</title>
        <p>Model performance was assessed using accuracy, precision, recall (sensitivity), <italic>F</italic><sub>1</sub>-score, and confusion matrix:</p>
        <list list-type="bullet">
          <list-item>
            <p>The accuracy reflects the overall proportion of correctly classified samples in all classes.</p>
          </list-item>
          <list-item>
            <p>Precision quantifies the reliability of positive predictions for a given class by indicating how many predicted positives are correct.</p>
          </list-item>
          <list-item>
            <p>Recall (sensitivity) measures the model’s ability to correctly identify true instances of a given class.</p>
          </list-item>
          <list-item>
            <p>The <italic>F</italic><sub>1</sub>-score provides a balanced summary of precision and recall, particularly useful when both false positives and false negatives matter.</p>
          </list-item>
          <list-item>
            <p>The confusion matrix offers a class-by-class view of predictions vs ground truth, enabling identification of error patterns (eg, which gait classes are most frequently confused). For the 3-class setting in this study (slow walking, fast walking, and uphill walking), a 3×3 matrix was used to summarize the results per class and general trends.</p>
          </list-item>
        </list>
        <p>All metrics were computed on the designated evaluation split without using any evaluation data for parameter updating. Unless stated otherwise, results are reported at the overall level to facilitate comparison among the baseline CNN, the AE-CNN cascade, the encoder-augmented CNN, and traditional classifiers; confusion matrices are additionally provided to illustrate class-wise error patterns. To reduce optimistic bias due to correlated frame-wise samples, all data splitting was performed at the participant level. Specifically, all stance-phase frames from the same participant were assigned to a single fold during cross-validation, ensuring that no participant’s frames appeared in both the training and validation sets within any iteration. Model performance was summarized using accuracy, precision, recall, and <italic>F</italic><sub>1</sub>-score.</p>
      </sec>
      <sec>
        <title>Reporting Guidelines</title>
        <p>This model development and evaluation study was reported with reference to the CREMLS (Consolidated Reporting of Machine Learning Studies) checklist. The completed checklist is provided in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>This section summarizes the main comparative findings from the deep learning and classical machine learning models evaluated. Detailed training curves, optimization experiments, and hyperparameter tuning results are provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p>
        <p>Among the baseline deep learning models, the Light CNN achieved an <italic>F</italic><sub>1</sub>-score of 94.44% on the held-out test set, whereas the AE-CNN cascade achieved an <italic>F</italic><sub>1</sub>-score of 92.45%.</p>
        <p>Additional optimization analyses of the encoder-augmented CNN, including comparisons of downsampling strategies, batch normalization configurations, and bottleneck layer inclusion, supported the final model design reported here (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>).</p>
        <p>For the classical machine learning models trained on AE-derived features, support vector machine (SVM) with a radial basis function kernel performed best (<italic>F</italic><sub>1</sub>-score=93.76%), followed by k-nearest neighbors (<italic>F</italic><sub>1</sub>-score=91.73%) and random forest (<italic>F</italic><sub>1</sub>-score=88.54%).</p>
      </sec>
      <sec>
        <title>Principal Findings</title>
        <p>The primary finding of this study is that the proposed encoder-augmented CNN architecture achieves superior performance (<italic>F</italic><sub>1</sub>-score=96.20%) in classifying dynamic gait patterns from plantar pressure images compared to both baseline deep learning models and classical machine learning classifiers (<xref rid="figure7" ref-type="fig">Figure 7</xref>; <xref ref-type="table" rid="table5">Table 5</xref>). This result supported the hypothesis that an integrated deep learning architecture, combining the feature extraction and dimensionality reduction capabilities of an AE with the classification power of a CNN, is highly effective for this task.</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Overall performance comparison of all models. AE-CNN: autoencoder convolutional neural network; KNN: k-nearest neighbors; SVM-RBF: support vector machine-radial basis function.</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <table-wrap position="float" id="table5">
          <label>Table 5</label>
          <caption>
            <p>Overall performance in the held-out test set.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="400"/>
            <col width="170"/>
            <col width="160"/>
            <col width="120"/>
            <col width="150"/>
            <thead>
              <tr valign="top">
                <td>Model</td>
                <td>Accuracy</td>
                <td>Precision</td>
                <td>Recall</td>
                <td><italic>F</italic><sub>1</sub>-score</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>KNN<sup>a</sup></td>
                <td>91.76</td>
                <td>91.77</td>
                <td>91.76</td>
                <td>91.73</td>
              </tr>
              <tr valign="top">
                <td>SVM<sup>b</sup>-RBF<sup>c</sup></td>
                <td>93.76</td>
                <td>93.84</td>
                <td>93.76</td>
                <td>93.76</td>
              </tr>
              <tr valign="top">
                <td>Random forest</td>
                <td>88.58</td>
                <td>88.61</td>
                <td>88.58</td>
                <td>88.54</td>
              </tr>
              <tr valign="top">
                <td>AE<sup>d</sup>-CNN<sup>e</sup> cascade</td>
                <td>92.42</td>
                <td>92.50</td>
                <td>92.42</td>
                <td>92.45</td>
              </tr>
              <tr valign="top">
                <td>Light CNN</td>
                <td>94.42</td>
                <td>94.47</td>
                <td>94.42</td>
                <td>94.44</td>
              </tr>
              <tr valign="top">
                <td>Encoder-augmented CNN</td>
                <td>96.21</td>
                <td>96.21</td>
                <td>96.21</td>
                <td>96.20</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table5fn1">
              <p><sup>a</sup>KNN: k-nearest neighbors.</p>
            </fn>
            <fn id="table5fn2">
              <p><sup>b</sup>SVM: support vector machine.</p>
            </fn>
            <fn id="table5fn3">
              <p><sup>c</sup>RBF: radial basis function.</p>
            </fn>
            <fn id="table5fn4">
              <p><sup>d</sup>AE: autoencoder.</p>
            </fn>
            <fn id="table5fn5">
              <p><sup>e</sup>CNN: convolutional neural network.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Overview</title>
        <p>This model development and evaluation study demonstrates that the integration of AE-based representation learning with CNN architectures can achieve accurate recognition of gait patterns based on plantar pressure in a pilot dataset of healthy participants. Future work should validate generalization across broader demographics and clinical populations, evaluate robustness to real-world variability (eg, footwear, sensor noise, and speed fluctuations), and further strengthen interpretability and deployment feasibility for wearable or embedded applications. The performance of the Light CNN (<italic>F</italic><sub>1</sub>-score=94.44%) also demonstrated that even a lightweight, stand-alone CNN can effectively learn discriminative features from plantar pressure data. Among the classical methods, the <bold>support vector machine</bold>–RBF classifier proved to be the most robust, outperforming <bold>k-nearest neighbors</bold> and random forest, which aligns with its known strengths in handling high-dimensional feature spaces.</p>
        <p>The study also highlights the importance of architectural choices in model optimization. Supplementary analyses of downsampling strategy, batch normalization placement, and bottleneck layer inclusion are provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. These experiments supported the final selection of the encoder-augmented CNN configuration reported in the main manuscript.</p>
      </sec>
      <sec>
        <title>Interpretation of Results and Gait Feature Analysis</title>
        <p>A notable and consistent finding in multiple models was the confusion between “fast walking” and “uphill walking” gaits (<xref rid="figure8" ref-type="fig">Figure 8</xref>). The encoder-augmented CNN, for example, misclassified 23 “uphill” instances as “fast walking.” This suggests that at similar walking speeds, the plantar pressure distributions for these 2 activities share substantial similarities. The primary differentiator may lie in subtle temporal features or pressure shifts related to gravitational resistance during uphill walking, which current spatial feature–focused models may not fully capture. In contrast, “slow walking” was classified with very high precision, indicating that variations in walking speed produce more distinct plantar pressure patterns than variations in surface uphill.</p>
        <fig id="figure8" position="float">
          <label>Figure 8</label>
          <caption>
            <p>Confusion matrix for the encoder-augmented convolutional neural network (CNN).</p>
          </caption>
          <graphic xlink:href="formative_v10i1e88488_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Comparison With Prior Work</title>
        <p>The use of deep learning, particularly CNNs, for plantar pressure analysis is consistent with recent trends in biomechanics and clinical research. Although many studies have successfully used CNNs for static pressure images (eg, for disease diagnosis), this research extends their application to dynamic gait classification. The accuracy achieved by the encoder-augmented CNN (96.21%) is competitive with or exceeds that reported in other studies using different sensor modalities or classification algorithms for similar tasks. The finding that an integrated AE-CNN architecture outperforms a standard CNN suggests that explicit feature learning and dimensionality reduction prior to classification can be a beneficial strategy for complex, high-variance data such as plantar pressure sequences. Clinical and translational implications should be interpreted with caution. This pilot study included only healthy young adults under controlled treadmill conditions and did not include participants with pathological gaits (eg, diabetes-related gait alteration). Therefore, while plantar pressure imaging is clinically relevant and the proposed framework shows technical promise, clinical screening performance has not been empirically validated here and requires future studies in clinical cohorts and real-world environments. In addition, our current approach performs frame-wise classification of stance-phase plantar pressure maps and does not explicitly model temporal dynamics across gait cycles; future work could incorporate sequence models (eg, temporal CNNs or long short-term memory) to better capture temporal gait signatures.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>Although this study has yielded promising results, several limitations should be considered:</p>
        <p>1. Single data source—plantar pressure data in this study were collected from a limited number of participants in a controlled laboratory environment. Whether the model’s generalization ability can be extended to populations with different ages, genders, weights, or specific pathological gaits (eg, flat feet and diabetic foot) requires further validation.</p>
        <p>2. Model interpretability—compared to classical machine learning models, deep learning models are often regarded as “black boxes,” with less transparent decision-making processes. Although this study validated the effectiveness of the model, it did not explore which specific regions or features of plantar pressure images the model used for classification.</p>
        <p>3. Limited gait types—the study only covered 3 specific dynamic gaits. The applicability of the model to more complex daily activities, such as walking downhill, turning, or climbing stairs, has yet to be determined.</p>
        <p>In addition, the evaluation in this study was reported primarily at the frame level. Although frame-wise metrics are useful for model comparison, participant-level performance (eg, aggregating frame-level predictions to participant-level decisions) should be reported in future studies to better reflect real-world use. Moreover, statistical uncertainty (eg, CIs via bootstrapping) and systematic robustness tests under controlled perturbations (eg, varying noise intensity or sensor shift) were not performed in this study and remain important directions for future work.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This study developed and evaluated a deep learning architecture named encoder-augmented CNN for gait classification using plantar pressure images. The model combines the feature extraction capabilities of an AE with the classification strengths of a CNN. Through systematic structural optimization, it ultimately achieved an accuracy of 96.21% in the 3-class dynamic-gait recognition task, outperforming classical machine learning methods and other deep learning variants in this study.</p>
        <p>On the basis of the findings and limitations of this research, future studies could proceed in the following directions:</p>
        <list list-type="bullet">
          <list-item>
            <p>Database expansion and model generalization—recruit a more diverse range of participants and collect data in settings closer to real-life scenarios to validate and enhance the model’s generalization ability. Future clinical studies could explore whether this framework can assist in the assessment of pathological gaits.</p>
          </list-item>
          <list-item>
            <p>Enhancing the interpretability of the model—introduce visualization techniques (eg, gradient-weighted class activation mapping) to analyze the plantar pressure heat maps that the model focuses on during classification decisions. This would improve understanding of the basis for the model’s decisions and could potentially lead to the discovery of new biomechanical indicators.</p>
          </list-item>
          <list-item>
            <p>Multimodal data fusion—to address the confusion between fast walking and uphill walking, future work could attempt to fuse data from other sensors (such as gyroscopes and accelerometers) to build a multimodal gait recognition system, with the aim of achieving higher classification accuracy.</p>
          </list-item>
          <list-item>
            <p>Model lightweighting and real-time application—explore techniques such as model pruning or knowledge distillation to further reduce the computational complexity of the encoder-augmented CNN. This would enable its deployment on wearable devices or embedded systems for real-time gait monitoring and feedback.</p>
          </list-item>
        </list>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>CREMLS_checklist.</p>
        <media xlink:href="formative_v10i1e88488_app1.docx" xlink:title="DOCX File , 59 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>Supporting optimization analyses, training curves, and hyperparameter tuning results.</p>
        <media xlink:href="formative_v10i1e88488_app2.docx" xlink:title="DOCX File , 554 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AE</term>
          <def>
            <p>autoencoder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CREMLS</term>
          <def>
            <p>Consolidated Reporting of Machine Learning Studies</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>No generative artificial intelligence tools were used in the writing or editing of this manuscript.</p>
    </ack>
    <notes>
      <title>Data Availability</title>
      <p>The datasets generated or analyzed during this study are not publicly available due to privacy and ethical restrictions but are available from the corresponding author on reasonable request. Access to deidentified data may be subject to institutional approval and a data use agreement, where applicable. Requests for data access may be directed to the corresponding author at byliau@fcu.edu.tw.</p>
    </notes>
    <notes>
      <title>Funding</title>
      <p>This study received no specific grant from any funding agency in the public, commercial, or not-for-profit sectors.</p>
    </notes>
    <fn-group>
      <fn fn-type="con">
        <p>Conceptualization: CCC, BYL</p>
        <p>Formal analysis: CCC</p>
        <p>Investigation: CCC</p>
        <p>Methodology: CCC, BYL, CWL, YKJ</p>
        <p>Resources: CCC, BYL</p>
        <p>Writing – original draft: CWL, YKJ, BYL</p>
        <p>Writing – review &#38; editing: CWL, YKJ, QQL, YYW, YSC, BYL</p>
        <p>All authors have read and agreed to the published version of the manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Albuquerque</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Verlekar</surname>
              <given-names>TT</given-names>
            </name>
            <name name-style="western">
              <surname>Correia</surname>
              <given-names>PL</given-names>
            </name>
            <name name-style="western">
              <surname>Soares</surname>
              <given-names>LD</given-names>
            </name>
          </person-group>
          <article-title>A spatiotemporal deep learning approach for automatic pathological gait classification</article-title>
          <source>Sensors (Basel)</source>
          <year>2021</year>
          <month>09</month>
          <day>16</day>
          <volume>21</volume>
          <issue>18</issue>
          <fpage>6202</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s21186202"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s21186202</pub-id>
          <pub-id pub-id-type="medline">34577408</pub-id>
          <pub-id pub-id-type="pii">s21186202</pub-id>
          <pub-id pub-id-type="pmcid">PMC8473368</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bauman</surname>
              <given-names>VV</given-names>
            </name>
            <name name-style="western">
              <surname>Brandon</surname>
              <given-names>SC</given-names>
            </name>
          </person-group>
          <article-title>Gait phase detection in walking and stairs using machine learning</article-title>
          <source>J Biomech Eng</source>
          <year>2022</year>
          <month>12</month>
          <day>01</day>
          <volume>144</volume>
          <issue>12</issue>
          <fpage>121007</fpage>
          <pub-id pub-id-type="doi">10.1115/1.4055504</pub-id>
          <pub-id pub-id-type="medline">36062965</pub-id>
          <pub-id pub-id-type="pii">1146023</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Krutaraniyom</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sengchuai</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Booranawong</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jaruenpun-yasak</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Pilot study on gait classification using machine learning</article-title>
          <source>Proceedings of the 2022 International Electrical Engineering Congress</source>
          <year>2022</year>
          <conf-name>iEECON</conf-name>
          <conf-date>March 9-11, 2022</conf-date>
          <conf-loc>Khon Kaen, Thailand</conf-loc>
          <pub-id pub-id-type="doi">10.1109/ieecon53204.2022.9741586</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Semwal</surname>
              <given-names>VB</given-names>
            </name>
            <name name-style="western">
              <surname>Jain</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Maheshwari</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Khatwani</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Gait reference trajectory generation at different walking speeds using LSTM and CNN</article-title>
          <source>Multimed Tools Appl</source>
          <year>2023</year>
          <month>03</month>
          <day>13</day>
          <volume>82</volume>
          <fpage>33401</fpage>
          <lpage>19</lpage>
          <pub-id pub-id-type="doi">10.1007/s11042-023-14733-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deschamps</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Matricali</surname>
              <given-names>GA</given-names>
            </name>
            <name name-style="western">
              <surname>Roosen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Desloovere</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Bruyninckx</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Spaepen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Nobels</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Tits</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Flour</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Staes</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Classification of forefoot plantar pressure distribution in persons with diabetes: a novel perspective for the mechanical management of diabetic foot?</article-title>
          <source>PLoS One</source>
          <year>2013</year>
          <month>11</month>
          <day>22</day>
          <volume>8</volume>
          <issue>11</issue>
          <fpage>e79924</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0079924"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0079924</pub-id>
          <pub-id pub-id-type="medline">24278219</pub-id>
          <pub-id pub-id-type="pii">PONE-D-13-06701</pub-id>
          <pub-id pub-id-type="pmcid">PMC3838415</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Amemiya</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Noguchi</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Oe</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Takehara</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Yamada</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ohashi</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ueki</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kadowaki</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Mori</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Sanada</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Relationship between elevated plantar pressure of toes and forefoot and gait features in diabetic patients</article-title>
          <source>Annu Int Conf IEEE Eng Med Biol Soc</source>
          <year>2013</year>
          <volume>2013</volume>
          <fpage>4633</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/EMBC.2013.6610580</pub-id>
          <pub-id pub-id-type="medline">24110767</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yuan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Fu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Zang</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Piezoresistive-based gait monitoring technique for the recognition of knee osteoarthritis patients</article-title>
          <source>IEEE Access</source>
          <year>2022</year>
          <month>11</month>
          <day>21</day>
          <volume>10</volume>
          <fpage>123874</fpage>
          <lpage>84</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2022.3224047</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>ND</given-names>
            </name>
            <name name-style="western">
              <surname>Bui</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Truong</surname>
              <given-names>PH</given-names>
            </name>
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>GM</given-names>
            </name>
          </person-group>
          <article-title>Classification of five ambulatory activities regarding stair and incline walking using smart shoes</article-title>
          <source>IEEE Sens J</source>
          <year>2018</year>
          <month>5</month>
          <day>17</day>
          <volume>18</volume>
          <issue>13</issue>
          <fpage>5422</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2018.2837674</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jeong</surname>
              <given-names>GM</given-names>
            </name>
            <name name-style="western">
              <surname>Truong</surname>
              <given-names>PH</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>SI</given-names>
            </name>
          </person-group>
          <article-title>Classification of three types of walking activities regarding stairs using plantar pressure sensors</article-title>
          <source>IEEE Sens J</source>
          <year>2017</year>
          <month>03</month>
          <day>15</day>
          <volume>17</volume>
          <issue>9</issue>
          <fpage>2638</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1109/JSEN.2017.2682322</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jun</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>DW</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>MS</given-names>
            </name>
          </person-group>
          <article-title>Deep learning-based multimodal abnormal gait classification using a 3D skeleton and plantar foot pressure</article-title>
          <source>IEEE Access</source>
          <year>2021</year>
          <month>11</month>
          <day>30</day>
          <volume>9</volume>
          <fpage>161576</fpage>
          <lpage>89</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2021.3131613</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Pratap</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Narayan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dwivedy</surname>
              <given-names>SK</given-names>
            </name>
          </person-group>
          <article-title>Sensory data classification for gait assessment using deep neural networks: a comparative study with SGD and Adam optimizer</article-title>
          <source>Proceedings of the 2024 IEEE International Conference on Interdisciplinary Approaches in Technology and Management for Social Innovation</source>
          <year>2024</year>
          <conf-name>IATMSI</conf-name>
          <conf-date>March 14-16, 2024</conf-date>
          <conf-loc>Gwalior, India</conf-loc>
          <pub-id pub-id-type="doi">10.1109/iatmsi60426.2024.10503416</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fricke</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Alizadeh</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Zakhary</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Woost</surname>
              <given-names>TB</given-names>
            </name>
            <name name-style="western">
              <surname>Bogdan</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Classen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of three machine learning algorithms for the automatic classification of EMG patterns in gait disorders</article-title>
          <source>Front Neurol</source>
          <year>2021</year>
          <month>5</month>
          <day>21</day>
          <volume>12</volume>
          <fpage>666458</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/34093413"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fneur.2021.666458</pub-id>
          <pub-id pub-id-type="medline">34093413</pub-id>
          <pub-id pub-id-type="pmcid">PMC8175858</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cho</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Walking speed estimation and gait classification using plantar pressure and on-device deep learning</article-title>
          <source>IEEE Sens J</source>
          <year>2023</year>
          <month>10</month>
          <day>1</day>
          <volume>23</volume>
          <issue>19</issue>
          <fpage>23336</fpage>
          <lpage>47</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2023.3305024</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chhoeum</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Min</surname>
              <given-names>SD</given-names>
            </name>
          </person-group>
          <article-title>A convolution neural network approach to access knee joint angle using foot pressure mapping images: a preliminary investigation</article-title>
          <source>IEEE Sens J</source>
          <year>2021</year>
          <month>8</month>
          <day>1</day>
          <volume>21</volume>
          <issue>15</issue>
          <fpage>16937</fpage>
          <lpage>44</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2021.3079516</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ling</surname>
              <given-names>ZQ</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>YP</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>GZ</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>LL</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>DP</given-names>
            </name>
          </person-group>
          <article-title>AE-CNN-based multisource data fusion for gait motion step length estimation</article-title>
          <source>IEEE Sens J</source>
          <year>2022</year>
          <month>11</month>
          <day>1</day>
          <volume>22</volume>
          <issue>21</issue>
          <fpage>20805</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2022.3206883</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ardhianto</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Subiakto</surname>
              <given-names>RB</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Jan</surname>
              <given-names>YK</given-names>
            </name>
            <name name-style="western">
              <surname>Liau</surname>
              <given-names>BY</given-names>
            </name>
            <name name-style="western">
              <surname>Tsai</surname>
              <given-names>JY</given-names>
            </name>
            <name name-style="western">
              <surname>Akbari</surname>
              <given-names>VB</given-names>
            </name>
            <name name-style="western">
              <surname>Lung</surname>
              <given-names>CW</given-names>
            </name>
          </person-group>
          <article-title>A deep learning method for foot progression angle detection in plantar pressure images</article-title>
          <source>Sensors (Basel)</source>
          <year>2022</year>
          <month>04</month>
          <day>05</day>
          <volume>22</volume>
          <issue>7</issue>
          <fpage>2786</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s22072786"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s22072786</pub-id>
          <pub-id pub-id-type="medline">35408399</pub-id>
          <pub-id pub-id-type="pii">s22072786</pub-id>
          <pub-id pub-id-type="pmcid">PMC9003219</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Gui</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Gu</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Du</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Cheng</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>A plantar pressure detection and gait analysis system based on flexible triboelectric pressure sensor array and deep learning</article-title>
          <source>Small</source>
          <year>2025</year>
          <month>01</month>
          <volume>21</volume>
          <issue>1</issue>
          <fpage>e2405064</fpage>
          <pub-id pub-id-type="doi">10.1002/smll.202405064</pub-id>
          <pub-id pub-id-type="medline">39473332</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="web">
          <article-title>F-Scan GO system</article-title>
          <source>Tekscan</source>
          <access-date>2026-02-14</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.tekscan.com/products-solutions/systems/f-scan-system">https://www.tekscan.com/products-solutions/systems/f-scan-system</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
