<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JFR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id>
      <journal-title>JMIR Formative Research</journal-title>
      <issn pub-type="epub">2561-326X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i1e67969</article-id>
      <article-id pub-id-type="pmid">39869898</article-id>
      <article-id pub-id-type="doi">10.2196/67969</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Multimodal Pain Recognition in Postoperative Patients: Machine Learning Approach</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chennekkattu Markose</surname>
            <given-names>Ginoop</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Gao</surname>
            <given-names>Xiang</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Subramanian</surname>
            <given-names>Ajan</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <address>
            <institution>Department of Computer Science</institution>
            <institution>University of California, Irvine</institution>
            <addr-line>3211 Donald Bren Hall</addr-line>
            <addr-line>Irvine, CA, 92617</addr-line>
            <country>United States</country>
            <phone>1 6506604994</phone>
            <email>ajans1@uci.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3253-1300</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Cao</surname>
            <given-names>Rui</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-9295-8299</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Naeini</surname>
            <given-names>Emad Kasaeyan</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7438-2641</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Aqajari</surname>
            <given-names>Seyed Amir Hossein</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff02" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1747-6980</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Hughes</surname>
            <given-names>Thomas D</given-names>
          </name>
          <degrees>RN</degrees>
          <xref rid="aff03" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0651-9394</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Calderon</surname>
            <given-names>Michael-David</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff04" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6824-6945</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Zheng</surname>
            <given-names>Kai</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff05" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4121-4948</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Dutt</surname>
            <given-names>Nikil</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3060-8119</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author">
          <name name-style="western">
            <surname>Liljeberg</surname>
            <given-names>Pasi</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff06" ref-type="aff">6</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9392-3589</ext-link>
        </contrib>
        <contrib id="contrib10" contrib-type="author">
          <name name-style="western">
            <surname>Salanterä</surname>
            <given-names>Sanna</given-names>
          </name>
          <degrees>RN, PhD</degrees>
          <xref rid="aff07" ref-type="aff">7</xref>
          <xref rid="aff08" ref-type="aff">8</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2529-6699</ext-link>
        </contrib>
        <contrib id="contrib11" contrib-type="author">
          <name name-style="western">
            <surname>Nelson</surname>
            <given-names>Ariana M</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff09" ref-type="aff">9</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1575-1635</ext-link>
        </contrib>
        <contrib id="contrib12" contrib-type="author">
          <name name-style="western">
            <surname>Rahmani</surname>
            <given-names>Amir M</given-names>
          </name>
          <degrees>MBA, PhD</degrees>
          <xref rid="aff01" ref-type="aff">1</xref>
          <xref rid="aff02" ref-type="aff">2</xref>
          <xref rid="aff03" ref-type="aff">3</xref>
          <xref rid="aff10" ref-type="aff">10</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0725-1155</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff01">
        <label>1</label>
        <institution>Department of Computer Science</institution>
        <institution>University of California, Irvine</institution>
        <addr-line>Irvine, CA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff02">
        <label>2</label>
        <institution>Department of Electrical Engineering and Computer Science</institution>
        <institution>University of California, Irvine</institution>
        <addr-line>Irvine, CA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff03">
        <label>3</label>
        <institution>School of Nursing</institution>
        <institution>University of California, Irvine</institution>
        <addr-line>Irvine, CA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff04">
        <label>4</label>
        <institution>College of Medicine</institution>
        <institution>Kansas City University</institution>
        <addr-line>Kansas City, MO</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff05">
        <label>5</label>
        <institution>Department of Informatics</institution>
        <institution>University of California, Irvine</institution>
        <addr-line>Irvine, CA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff06">
        <label>6</label>
        <institution>Department of Computing</institution>
        <institution>University of Turku</institution>
        <addr-line>Turku</addr-line>
        <country>Finland</country>
      </aff>
      <aff id="aff07">
        <label>7</label>
        <institution>Department of Nursing Science</institution>
        <institution>University of Turku</institution>
        <addr-line>Turku</addr-line>
        <country>Finland</country>
      </aff>
      <aff id="aff08">
        <label>8</label>
        <institution>Turku University Hospital</institution>
        <institution>University of Turku</institution>
        <addr-line>Turku</addr-line>
        <country>Finland</country>
      </aff>
      <aff id="aff09">
        <label>9</label>
        <institution>Department of Anesthesiology and Pain Medicine</institution>
        <institution>University of California, Irvine</institution>
        <addr-line>Irvine, CA</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff10">
        <label>10</label>
        <institution>Institute for Future Health</institution>
        <institution>University of California, Irvine</institution>
        <addr-line>Irvine, CA</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Ajan Subramanian <email>ajans1@uci.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>27</day>
        <month>1</month>
        <year>2025</year>
      </pub-date>
      <volume>9</volume>
      <elocation-id>e67969</elocation-id>
      <history>
        <date date-type="received">
          <day>24</day>
          <month>10</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>25</day>
          <month>11</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>28</day>
          <month>11</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>29</day>
          <month>11</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Ajan Subramanian, Rui Cao, Emad Kasaeyan Naeini, Seyed Amir Hossein Aqajari, Thomas D Hughes, Michael-David Calderon, Kai Zheng, Nikil Dutt, Pasi Liljeberg, Sanna Salanterä, Ariana M Nelson, Amir M Rahmani. Originally published in JMIR Formative Research (https://formative.jmir.org), 27.01.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on https://formative.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://formative.jmir.org/2025/1/e67969" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Acute pain management is critical in postoperative care, especially in vulnerable patient populations that may be unable to self-report pain levels effectively. Current methods of pain assessment often rely on subjective patient reports or behavioral pain observation tools, which can lead to inconsistencies in pain management. Multimodal pain assessment, integrating physiological and behavioral data, presents an opportunity to create more objective and accurate pain measurement systems. However, most previous work has focused on healthy subjects in controlled environments, with limited attention to real-world postoperative pain scenarios. This gap necessitates the development of robust, multimodal approaches capable of addressing the unique challenges associated with assessing pain in clinical settings, where factors like motion artifacts, imbalanced label distribution, and sparse data further complicate pain monitoring.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to develop and evaluate a multimodal machine learning–based framework for the objective assessment of pain in postoperative patients in real clinical settings using biosignals such as electrocardiogram, electromyogram, electrodermal activity, and respiration rate (RR) signals.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>The iHurt study was conducted on 25 postoperative patients at the University of California, Irvine Medical Center. The study captured multimodal biosignals during light physical activities, with concurrent self-reported pain levels using the Numerical Rating Scale. Data preprocessing involved noise filtering, feature extraction, and combining handcrafted and automatic features through convolutional and long-short-term memory autoencoders. Machine learning classifiers, including support vector machine, random forest, adaptive boosting, and k-nearest neighbors, were trained using weak supervision and minority oversampling to handle sparse and imbalanced pain labels. Pain levels were categorized into baseline and 3 levels of pain intensity (1-3).</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The multimodal pain recognition models achieved an average balanced accuracy of over 80% across the different pain levels. RR models consistently outperformed other single modalities, particularly for lower pain intensities, while facial muscle activity (electromyogram) was most effective for distinguishing higher pain intensities. Although single-modality models, especially RR, generally provided higher performance compared to multimodal approaches, our multimodal framework still delivered results that surpassed most previous works in terms of overall accuracy.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This study presents a novel, multimodal machine learning framework for objective pain recognition in postoperative patients. The results highlight the potential of integrating multiple biosignal modalities for more accurate pain assessment, with particular value in real-world clinical settings.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>pain intensity recognition</kwd>
        <kwd>multimodal information fusion</kwd>
        <kwd>signal processing</kwd>
        <kwd>weak supervision</kwd>
        <kwd>health care</kwd>
        <kwd>pain intensity</kwd>
        <kwd>pain recognition</kwd>
        <kwd>machine learning approach</kwd>
        <kwd>acute pain</kwd>
        <kwd>pain assessment</kwd>
        <kwd>behavioral pain</kwd>
        <kwd>pain measurement</kwd>
        <kwd>pain monitoring</kwd>
        <kwd>multimodal machine learning–based framework</kwd>
        <kwd>machine learning–based framework</kwd>
        <kwd>electrocardiogram</kwd>
        <kwd>electromyogram</kwd>
        <kwd>electrodermal activity</kwd>
        <kwd>self-reported pain level</kwd>
        <kwd>clinical pain management</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Pain is defined by the International Association for the Study of Pain as “an unpleasant sensory and emotional experience associated with actual or potential tissue damage or described in terms of such damage” [<xref ref-type="bibr" rid="ref1">1</xref>]. Pain is a unique phenomenon that individuals experience and perceive independently. Younger et al [<xref ref-type="bibr" rid="ref2">2</xref>] stated that pain is a subjective experience for which there is no current objective measure. Pain may be classified as either acute or chronic; Kent et al [<xref ref-type="bibr" rid="ref3">3</xref>] described acute pain as encompassing the immediate, time-limited bodily response to a noxious stimulus that triggers actions to avoid or mitigate ongoing injury. Chronic pain was first defined loosely by Bonica [<xref ref-type="bibr" rid="ref4">4</xref>] as pain that extends beyond an expected timeframe; currently, chronic pain is defined as “persistent or recurrent pain lasting longer than three months” [<xref ref-type="bibr" rid="ref5">5</xref>]. The focus of this paper is on acute pain.</p>
      <p>Acute pain is a common experience in the postanesthesia care unit in the immediate period following surgery. According to Chou et al [<xref ref-type="bibr" rid="ref6">6</xref>], pain occurs in 80% of patients following surgery, and 75% of patients with pain report their pain as either moderate, severe, or extreme. Current guidelines for the assessment of pain in the postanesthesia care unit recommend using a Numerical Rating Scale (NRS) or Verbal Rating Scale for patients who are sufficiently awake and coherent to reliably report pain scores [<xref ref-type="bibr" rid="ref7">7</xref>]. However, Herr et al [<xref ref-type="bibr" rid="ref8">8</xref>] identified several patient populations who are at risk for being incapable of providing self-report scores of pain; specifically, these populations include the pediatric population who have yet to develop adequate cognition; older patients with dementia; individuals with intellectual disabilities; and those who are unconscious, critically ill, or terminally ill. In these patient populations, Small and Laycock [<xref ref-type="bibr" rid="ref7">7</xref>] recommend the use of behavioral pain scales, such as the Pain Assessment in Advanced Dementia, Critical Care Pain Observation Tool (CPOT), or Behavioral Pain Scale. Despite the pain assessment measures of self-report and behavioral pain scales, each of these methods may be prone to biases. For example, Craig et al [<xref ref-type="bibr" rid="ref9">9</xref>] discussed how self-reporting might be a means to obtain a particular goal that can be influenced by the individual reporting pain. In addition, Hadjistavropoulos and Craig [<xref ref-type="bibr" rid="ref10">10</xref>] provided the Communications Model of Pain, which provided a basis for how expressive behaviors are decoded by observers of individuals in pain, which are influenced by the message clarity transmitted by the individual in pain as well as the unique biases (eg, knowledge level, assessment skills, and predisposing beliefs) of the individual assessing pain. The difficult nature of interpreting pain scores has resulted in disparities in pain management in minority populations, with research by Staton et al [<xref ref-type="bibr" rid="ref11">11</xref>] showing that the Black race is a significant predictor of the underestimation of pain by physicians.</p>
      <p>Multimodal pain assessment represents a potential method of circumventing the limitations of traditional self-report and behavioral pain assessment tools and an opportunity for enhancing pain assessment in vulnerable populations. Instead of having to rely on only one dimension of pain assessment, such as behaviors through the use of the CPOT or Behavioral Pain Scale, future multimodal pain assessment will incorporate physiological indicators, such as electrodermal activity (EDA), electrocardiogram (ECG), electroencephalogram, and electromyogram (EMG) as well as behaviors (eg, facial expression), and perhaps other as-yet undiscovered parameters to capture pain assessment in patient populations that might not be best represented by current assessment strategies. For example, a study by Gélinas et al [<xref ref-type="bibr" rid="ref12">12</xref>] found that revisions to the CPOT were necessary because some brain-injured patients may not exhibit certain behaviors that are contained in the CPOT. Similarly, for individuals diagnosed with dementia, Achterberg et al [<xref ref-type="bibr" rid="ref13">13</xref>] stated that there is a preponderance of observer-based pain assessment tools, however, these tools retain significant differences between them, as well as concerns for lack of reliability, validity, and sensitivity of change. Enhancing pain assessment through the combination of traditional pain assessment methods with novel multimodal approaches may serve to eventually enhance pain assessment in a greater majority of vulnerable patient populations.</p>
      <p>With the advent of connected Internet of Things devices and wearable sensor technology, automated data collection may achieve continuous pain intensity measurement. A significant amount of research has been conducted in recent years, which has sought to develop methods of continuous, automatic, and multimodal pain assessment. For example, previous work conducted by Walter et al [<xref ref-type="bibr" rid="ref14">14</xref>] and Werner et al [<xref ref-type="bibr" rid="ref15">15</xref>] used skin conductance level, ECG, electroencephalogram, and EMG to monitor pain in response to thermal pain. Other works, such as Hammal and Cohn [<xref ref-type="bibr" rid="ref16">16</xref>] and Werner et al [<xref ref-type="bibr" rid="ref17">17</xref>], have incorporated facial expression monitoring as an indicator of pain. While these studies were immensely beneficial to the scientific community in terms of their contributions to a better understanding of techniques to obtain continuous pain assessment, the setting of these experiments was in highly controlled laboratory environments with healthy participants. Collecting data in real-world situations as opposed to a laboratory setting would allow the researchers to assess a pain assessment technique’s potential in relation to actual pain brought about through a surgical procedure instead of induced pain.</p>
      <p>The aim of this study is to develop a robust and effective multimodal pain assessment framework for postoperative patients in real clinical settings. To the best of our knowledge, this is the first work proposing a multimodal pain assessment framework for postoperative patients. It should be noted that a pain assessment study on real patients is associated with several challenges (eg, imbalanced label distribution, missing data, motion artifacts, etc) since several parameters such as the intensity, distribution, frequency, and time of the pain as well as the environment cannot be controlled by researchers. Our main contributions are 4-fold:</p>
      <list list-type="order">
        <list-item>
          <p>We conducted a clinical study for multimodal signal acquisition from an acute pain unit of the University of California, Irvine Medical Center.</p>
        </list-item>
        <list-item>
          <p>We propose a multimodal pain assessment framework using our database (iHurt Pain DB) collected from postoperative patients while obtaining a higher accuracy compared to existing works on healthy participants [<xref ref-type="bibr" rid="ref17">17</xref>].</p>
        </list-item>
        <list-item>
          <p>We use both handcrafted and automatically generated features outputted from deep learning networks to build our models.</p>
        </list-item>
        <list-item>
          <p>We provide a novel method to mitigate the presence of sparse and imbalanced labels (due to the real clinical setting of the study) using weak supervision and minority oversampling.</p>
        </list-item>
      </list>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Overview</title>
        <p>Candidates were selected from the Acute Pain Service patient list at University of California Irvine Health in Orange, California. The Acute Pain Service unit at the medical center serves approximately 100 patients weekly, enabling the lead Doctor of Medicine to recruit patients. This is the first claimed study that collected biosignals from postoperative adult patients in hospitals. All participants (aged 23-89 years) were recruited to the study from July 2018 to October 2019.</p>
      </sec>
      <sec>
        <title>iHurt Study Design</title>
        <p>We conducted a biomedical data collection study on 25 postoperative patients reporting various degrees of pain symptoms. Multimodal biosignals (ECG, EMG, EDA, and photoplethysmography [PPG]) were collected from patients likely having mild to moderate pain who were asked to perform a few light physical activities while acquiring data. We also collected primary demographic information from each patient, including height, weight, sex, and BMI. All signals were collected using the iHurt system.</p>
      </sec>
      <sec>
        <title>iHurt System</title>
        <p>iHurt is a system that measures facial muscle activity (ie, changes in facial expression) in conjunction with physiological signals such as heart rate, heart rate variability, respiration rate (RR), and EDA for the purpose of developing an algorithm for pain assessment in hospitalized patients. The system uses the following 2 components to capture raw signals.</p>
        <sec>
          <title>Eight-Channel Biopotential Acquisition Device</title>
          <p>Our team at the University of Turku, Finland, developed a biopotential acquisition device to measure ECG and EMG signals. The device incorporates commercially available electrodes, electrode-to-device lead wires, an ADS1299-based portable device, and computer software (LabVIEW version 14.02f, National Instruments) to visualize data streaming from the portable device. Raw signals from the electrodes are sampled at 500 samples per second and are sent to the computer software through Bluetooth for visualization [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
        </sec>
        <sec>
          <title>Empatica E4</title>
          <p>We use the commercially available Empatica E4 wristband (Empatica Inc) [<xref ref-type="bibr" rid="ref19">19</xref>] to measure EDA and PPG signals. The purpose of using a wristband was to allow our participants to move freely without any impediments. The Empatica E4 was connected to the participants’ phones over Bluetooth for visualization.</p>
          <p>We removed 3 participants’ data from the final dataset due to the presence of excessive motion artifacts. We also excluded 2 additional patients since they were wearing the Empatica E4 watch on their arm that received intravenous medication. This resulted in unreliable EDA signals due to conditions like skin rash and itching. This left us with data from 20 patients to build our pain recognition system. The dataset also contains rich annotation with self-reported pain scores based on the 11-point NRS from 0 to 10. A detailed explanation of the dataset and the study design can be found in Kasaeyan Naeini et al [<xref ref-type="bibr" rid="ref20">20</xref>]. We intend to make the deidentified dataset available to the research community for further analysis and applications.</p>
        </sec>
      </sec>
      <sec>
        <title>Data Processing Pipeline</title>
        <p>The first step in building our multimodal pain assessment system was to process the raw signals collected during trials. The data processing pipeline consisted of the following steps:</p>
        <list list-type="order">
          <list-item>
            <p>We filtered the signal to remove powerline interference, baseline wander, and motion artifact noise.</p>
          </list-item>
          <list-item>
            <p>We performed feature extraction on the filtered signals to obtain amplitude and variability features in the time domain. The time domain features were extracted using 5.5-second and 10-second windows. The 5.5-second window size was extracted to be compared with previous work [<xref ref-type="bibr" rid="ref17">17</xref>].</p>
          </list-item>
          <list-item>
            <p>In addition to handcrafted features, we also used automatic features, which were outputted from a deep neural network.</p>
          </list-item>
          <list-item>
            <p>Once the features were extracted, we tagged them with their corresponding labels based on the nearest timestamp of the label.</p>
          </list-item>
          <list-item>
            <p>Each of these processing steps was applied individually to each of the 4 modalities. Processed data from each of the modalities were combined using either early fusion or late fusion. The types of handcrafted features extracted from each modality and the deep learning pipeline for extracting automatic features are described in detail. An overview of our method is described as a flowchart in <xref rid="figure1" ref-type="fig">Figure 1</xref>.</p>
          </list-item>
        </list>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Overview of the proposed method. AdaBoost: adaptive boosting; EDA: electrodermal activity; EMG: electromyogram; ECG: electrocardiogram; ML: machine learning; PPG: photoplethysmography; SMOTE: synthetic minority oversampling technique; SVM: support vector machine; RF: random forest; KNN: k-nearest neighbors.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e67969_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>ECG Handcrafted Features</title>
        <p>The ECG channel was filtered using a Butterworth band-pass filter with a frequency range of 0.1-250 Hz. The heart rate variability handcrafted features were extracted with <italic>pyHRV</italic>, an open-source Python (Python Software Foundation) toolbox [<xref ref-type="bibr" rid="ref21">21</xref>], using the R-peaks extracted from the ECG signal through a bidirectional long short-term memory (LSTM) network [<xref ref-type="bibr" rid="ref22">22</xref>]. These features were extracted from two window sizes, 5.5 and 10 seconds. There were 19 time-domain features. The time-domain features extracted from NN intervals, or the time interval between successive R-peaks, comprised of the slope of these intervals, 5 statistical features (total count, mean, minimum, maximum, and SD), 9 difference features (mean difference, minimum difference, maximum difference, SD of successive interval differences, root mean square of successive interval differences, number of interval differences greater than 20 ms and 50 ms, and percentage of successive interval differences that differ by more than 20 ms and 50 ms), and 4 heart rate features (mean, minimum, maximum, and SD) [<xref ref-type="bibr" rid="ref23">23</xref>].</p>
      </sec>
      <sec>
        <title>EMG Handcrafted Features</title>
        <p>The preprocessing phase of EMG channels comprised a 20 Hz high pass filter and two notch filters at 50 Hz and 100 Hz, all using a Butterworth filter. Like ECG features, we extracted EMG features from 5.5- and 10-second windows on 5 different channels for each major facial muscle. The ten amplitude features extracted were (1) peak, (2) peak-to-peak mean value, (3) root mean squared, (4) mean of the absolute values of the second differences, (5) mean of the absolute values of the first differences, (6) mean of the absolute values of the second differences of the normalized signal, (7) mean of the absolute values of the first differences of the normalized signal, (8) mean of local minima values, (9) mean of local maxima values, and (10) mean of absolute values. The four variability features were (1) variance, (2) SD, (3) range, and (4) IQR. All 14 features were calculated for 5 different EMG channels, resulting in 70 EMG features in total.</p>
      </sec>
      <sec>
        <title>EDA Handcrafted Features</title>
        <p>We used the <italic>pyEDA</italic> library [<xref ref-type="bibr" rid="ref24">24</xref>] for preprocessing and feature extraction of EDA signals. In the preprocessing part, first, we used a moving average across a 1-second window to remove the motion artifacts and smooth the data [<xref ref-type="bibr" rid="ref25">25</xref>]. Second, a low-pass Butterworth filter on the phasic data was applied to remove the line noise. Finally, preprocessed EDA signals corresponding to each different pain level were visualized to ensure the validity of the signals. In the feature extraction part, the <italic>cvxEDA</italic> algorithm [<xref ref-type="bibr" rid="ref26">26</xref>] was used to extract the phasic component of EDA signals. The EDA signals’ peaks or bursts are considered variations in the phasic component of the signal. Therefore, the clean signals and extracted phasic component of signals were fed to the statistical feature extraction module to extract the number of peaks, the average value, and the maximum and minimum value of the signals. Furthermore, these extracted features were further used in the post–feature extraction module to extract eight more features: (1) the difference between the maximum and the minimum value of the signal, (2) the SD, (3) the difference between the upper and lower quartiles (4) root mean square, (5) the mean value of local minima, (6) the mean value of local maxima, (7) the mean of the absolute values of the first differences, and (8) the mean of the absolute values of the second differences. This resulted in 12 EDA features in total.</p>
      </sec>
      <sec>
        <title>PPG-Based RR Handcrafted Features</title>
        <p>We preprocessed the PPG signal before extracting the RR from it. In total, 2 filters were used during the preprocessing [<xref ref-type="bibr" rid="ref27">27</xref>]. We first used a Butterworth band-pass filter to remove noises, including motion artifacts. Then, a moving average filter was implemented to smooth the PPG signal. After that, we applied an empirical mode decomposition–based method proposed by Madhav et al [<xref ref-type="bibr" rid="ref28">28</xref>] to derive respiration signals from filtered PPG signals. This method was proven to derive RR from a PPG signal with high accuracy (99.87%). A total of ten features were extracted from the respiratory signal, including (1) the number of inhale peaks, (2) the mean value of the signal, (3) the maximum value, (4) the minimum value, (5) the difference between the maximum and the minimum value, (6) SD, (7) the average value of the inhale peak intervals, (8) the SD of the inhale peak intervals, (9) the root mean square of successive differences between adjacent inhale peak intervals, (10) SD of inhale duration. A visualization of the handcrafted feature pipeline is shown in <xref rid="figure2" ref-type="fig">Figure 2</xref>.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Handcrafted feature extraction pipeline. dim.: dimension; ECG: electrocardiogram; EDA: electrodermal activity; EMG: electromyogram; PPG: photoplethysmography; RR: respiration rate.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e67969_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Automatic Feature Extraction Pipeline</title>
        <p>As the dimensionality of biomedical data increases, it becomes increasingly difficult to train a machine learning algorithm on the entire uncompressed dataset. This often leads to a large training time and is computationally more expensive overall. A possible solution is to perform feature engineering to get a compressed and interpretable representation of the signal. Another alternative approach, however, is to use the compressed or latent representation of that data obtained from deep learning networks trained for that specific task. Using automatic features helps in dimensionality reduction and can provide us with a sophisticated yet succinct representation of the data that handcrafted features alone cannot provide. This automatic feature extraction is typically carried out by an autoencoder (AE) network, which is an unsupervised neural network that learns how to efficiently compress and encode the data into a lower-dimensional space [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. AEs are composed of 2 separate networks: an encoder and a decoder. The encoder network acts as a bottleneck layer and maps the input into a lower-dimensional feature space. The decoder network tries to reconstruct this lower-dimensional feature vector into the original input size. The entire network is trained to minimize the reconstruction loss (ie, mean-squared error) by iteratively updating its weights and biases through backpropagation.</p>
        <p>A convolutional AE from the <italic>pyEDA</italic> library was used to extract automatic features. <xref rid="figure3" ref-type="fig">Figure 3</xref> shows the architecture of the AE. First, a linear layer (L1) is used to downsample the input signal with <italic>Input_Shape</italic> length to a length that is the closest power of 2 (CP2). This was done to make the model scalable to an arbitrary input size. The encoder half of the network consists of three 1D convolutional layers (C1, C2, and C3) and a linear layer (L2), which flattens and downsamples the input vector to a lower-dimensional latent vector. The number of dimensions of this latent vector (Feature Size) corresponds to the number of automatic features extracted and was set prior to training the network. A total of 32 features were extracted from ECG, EDA, and RR signals, whereas a total of 30 features were extracted from the EMG signal (6 features from each of the 5 channels). The decoder half of the network consists of three 1D deconvolutional layers (DeC1, DeC2, and DeC3) to reconstruct the input signal from the latent vector. A final linear layer (L3) is then used to flatten and reconstruct the signal to its original dimension. Both encoder and decoder networks have rectified linear unit activation between layers. Window sizes of both 5.5 and 10 seconds were applied to the filtered signals. This was done to compare the performance with handcrafted features. After signals from each of the modalities were normalized, they were trained on separate AE models for each modality. In addition to the convolutional AE, we also extracted features from an LSTM AE network. This resulted in two different feature extraction methods (convolutional and LSTM) that spanned two different window lengths (5.5 and 10 seconds).</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>The architecture of the pyEDA convolutional autoencoder.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e67969_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The batch size was set to 10, the number of training epochs was set to 100, and the ADAM optimizer [<xref ref-type="bibr" rid="ref28">28</xref>] was used with a learning rate of 1 × 10<sup>–3</sup>. A total of 126 feature vectors across all 4 modalities were extracted from each AE network. A visualization of our automatic feature extraction pipeline is shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Automatic feature extraction pipeline. dim.: dimension; ECG: electrocardiogram; EDA: electrodermal activity; EMG: electromyogram; RR: respiration rate; sec.: second.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e67969_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Data Augmentation</title>
        <sec>
          <title>Overview</title>
          <p>There were several inherent challenges in the distribution of labels, as NRS values recorded during the clinical trials of this study were collected from real postoperative patients. This problem bears less significance while studying healthy participants since the stimulated pain can be controlled during the experiments. Consequently, occurrences of some pain levels far exceeded those of others. For example, among all patients, there were only 4 reported occurrences of pain level 10, whereas there were more than 80 reported occurrences of pain level 4. This imbalanced distribution was inevitable due to the subjective nature and the different sources of pain among the participants. Therefore, while downsampling our pain labels to 4 classes, thresholds for each downsampled class were carefully chosen to ensure a more evenly distributed set of labels. The pain levels ranged from a baseline (BL) level of pain or no pain to 3 increasing intensities of pain (PL 1-3). The thresholds for the pain levels were as follows: (1) PL1 ranged from 0 to 3, (2) PL2 ranged from 4 to 6, and (3) PL3 ranged from 7 to 10. All the ranges here are inclusive.</p>
          <p>Since we asked patients to report their pain levels only while they performed pain-inducing activities, the number of labels generated was sparse. Both handcrafted and automatic features were combined with the corresponding labels using timestamps that were within the nearest 5.5 or 10 seconds (labeling threshold) of the reported NRS value. This depended on the window size of the features extracted. Due to having sparse labels, many of the feature windows were not assigned a corresponding label. To mitigate the problem of having an imbalanced and sparse label distribution, 2 techniques were exploited:</p>
        </sec>
        <sec>
          <title>Minority Oversampling</title>
          <p>The first technique, called synthetic minority oversampling technique (SMOTE), is a type of data augmentation that oversamples the minority class [<xref ref-type="bibr" rid="ref31">31</xref>]. SMOTE works by first choosing a minority class instance at random and finding its k-nearest minority class neighbors. It then creates a synthetic example at a randomly selected point between two instances of the minority class in that feature space. The experiments involving SMOTE were implemented using the <italic>imbalanced-learn</italic> Python library [<xref ref-type="bibr" rid="ref32">32</xref>].</p>
        </sec>
        <sec>
          <title>Weak Supervision</title>
          <p>The second technique we used is weak supervision using the Snorkel framework [<xref ref-type="bibr" rid="ref33">33</xref>]. Rather than employing an expert to manually label the unlabeled instances, Snorkel allows its users to write labeling functions that can make use of heuristics, patterns, external knowledge bases, and third-party machine learning models. Weak supervision is typically employed to label large volumes of unlabeled data when there are noisy, limited, or imprecise sources. For our pain assessment algorithm, we decided to use third-party machine learning models to label the remaining unlabeled instances. All the data points that were within the labeling threshold were considered as “strong labels,” or ground-truth values collected from patients during trials. The remaining unlabeled data points were kept aside for Snorkel to provide a weakly supervised label. The strong labels were fed into Snorkel’s labeling function consisting of 3 off-the-shelf machine learning models: (1) a support-vector machine (SVM) with a radial basis function kernel, (2) a random forest (RF) classifier, and (3) a k-nearest neighbor (KNN) classifier with uniform weights. Once each model was trained on the strong labels, it was used to make predictions on the remaining unlabeled data. The predictions from these 3 models were collected and converted into a single confidence-weighted label per data point using Snorkel’s <italic>LabelModel</italic> function. This function outputs the most confident prediction as the label for each data point. To perform a fair assessment of the reliability and accuracy of our algorithm, we used SMOTE and Snorkel only while training our machine learning models. The performance of these models was measured solely on ground-truth (strong) labels collected during trials. This way, there is no implicit bias introduced from mislabeling or upsampling certain data points to skew model predictions.</p>
        </sec>
      </sec>
      <sec>
        <title>Multimodal Machine Learning Models</title>
        <p>To compare the performance of our multimodal machine learning models with the previous work, we performed binary classification using a leave-one-subject-out cross-validation approach [<xref ref-type="bibr" rid="ref34">34</xref>]. In this method, a model’s performance is validated over multiple folds in such a way that data from each patient are either in the training set or in the testing set. The purpose of using this method is to provide generalizability to unseen patients and to avoid overfitting by averaging the results over multiple folds. The eventual goal of this study is to build personalized models that make predictions on a single patient but learn from data collected from a larger population of similar patients. The following machine learning models were used to evaluate the performance of our pain assessment algorithm: (1) KNN, (2) RF classifier, (3) adaptive boosting (AdaBoost), (4) and an SVM. The models were then evaluated using leave-one subject-out cross-validation. Four separate models were trained for each of the 3 pain intensities (eg, BL; no pain versus PL1, the lowest pain level; or BL vs PL3, the highest pain level).</p>
      </sec>
      <sec>
        <title>Fusing Modalities</title>
        <p>In total, 2 fusion approaches were used while combining features across different modalities. The first one is early or feature-level fusion, which concatenates feature vectors across different modalities based on their time stamps. The resulting data, which are now higher in dimension than any single modality, are then fed into our classifier to make predictions. While concatenating features across different modalities, a threshold of either 5.5 or 10 seconds was used to combine the modalities depending on the features extracted. The second approach was late or decision-level fusion, where each modality is fed to a separate classifier, and the final classification result is based on the fusion of outputs from the different modalities [<xref ref-type="bibr" rid="ref35">35</xref>].</p>
      </sec>
      <sec>
        <title>Feature Selection</title>
        <p>Since there were a lot of features generated during the data processing phase, we had to select a subset of the most informative features to build our models with. Therefore, to reduce the complexity and training time of the resulting model, feature selection using Gini importance was performed. Gini is important as a lightweight method that is simple and fast to compute. Since we extracted a relatively large number of features in our method, it made sense to use a computationally low-cost algorithm for feature selection. We computed the Gini importance of the features from the data in the training fold with the help of a random forest classifier and selected the top 25 features. We then trained our model on these top 25 features and evaluated them in the validation fold. Our proposed multimodal pain recognition system is shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Proposed multimodal pain recognition system. BL: baseline; CNN: convolutional neural network; ECG: electrocardiogram; EDA: electrodermal activity; EMG: electromyogram; PL: pain level; RR: respiration rate; SMOTE: synthetic minority oversampling technique.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e67969_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>The dataset used in this study was originally collected with approval from the Institutional Review Board (IRB) at the University of California, Irvine (Protocol HS# 2017-3747). Participants provided written informed consent after receiving detailed oral and written explanations of the study’s objectives and procedures. They were encouraged to discuss participation with family and friends before consenting. Investigators ensured that all participants understood the study and had their questions answered prior to enrollment. Participants were informed of their right to withdraw at any time without impacting their care. For the secondary analysis conducted in this study, the IRB approval and original informed consent covered the reuse of the data, and no additional consent was required. All data utilized for this study were anonymized prior to analysis to protect participants’ identities. Personal identifiers, such as names and contact information, were removed, and access to the data was restricted to authorized personnel only. The anonymized data were stored securely in compliance with institutional and regulatory guidelines to ensure confidentiality. Participation in the original study was entirely voluntary, and no compensation was provided. This ensured that participants’ involvement was based solely on their willingness to contribute to the research.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Experimental Settings</title>
        <p>The goal of our experiments was to compare the performance of using only a single modality to build our models over using a combination of multiple modalities. We trained several different models for each of the pain intensities, which varied in the types of modalities, data augmentation techniques, machine learning models, and fusion techniques used. <xref rid="figure6" ref-type="fig">Figure 6</xref> shows the general pipeline of the experiments we conducted. We first selected the type of modalities to train on, which varied from only using each of the single modalities separately to using a combination of all 4 modalities. Furthermore, these modalities varied depending on the type of features used, like handcrafted or automatic features. In the case of using multiple modalities, we had 2 choices of fusion: early (<xref rid="figure6" ref-type="fig">Figure 6</xref>, left) and late (<xref rid="figure6" ref-type="fig">Figure 6</xref>, right). These architectures varied in how the modalities were combined, either before training (early) or at the decision level (late) after training using majority voting. The data preparation process involved feature selection and data augmentation. These models could either be trained with no data augmentation, with just SMOTE or Snorkel, or a combination of both. The last step of the pipeline before making predictions involved choosing the type of machine learning algorithms, like SVM, RF, AdaBoost, or KNN. Due to the lack of space, only the best-performing single and multimodal model configurations are mentioned in the section below.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Our proposed general multimodal pipeline-based on early fusion (left) and late fusion (right). AdaBoost: adaptive boosting; KNN: k-nearest neighbors; ML: machine learning; RF: random forest; SVM: support vector machine; SMOTE: synthetic minority oversampling technique.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e67969_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Experimental Results</title>
        <p><xref ref-type="table" rid="table1">Tables 1</xref> and <xref ref-type="table" rid="table2">2</xref> present the best-performing single-modal and multimodal models for each of the 3 pain intensities. For comparison, the best multimodal results from Werner et al [<xref ref-type="bibr" rid="ref17">17</xref>], Lopez-Martinez and Picard [<xref ref-type="bibr" rid="ref36">36</xref>], Wang et al [<xref ref-type="bibr" rid="ref37">37</xref>], and Subramaniam and Dass [<xref ref-type="bibr" rid="ref38">38</xref>] are also mentioned. We use balanced accuracy as an evaluation criterion because our dataset had an imbalanced class distribution. Balanced accuracy is defined as the average of the true positive rate and the true negative rate.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Best scores: single modality versus multiple modalities.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="170"/>
            <col width="170"/>
            <col width="170"/>
            <col width="170"/>
            <col width="160"/>
            <col width="160"/>
            <thead>
              <tr valign="top">
                <td>Pain levels</td>
                <td>ECG<sup>a</sup> scores</td>
                <td>EMG<sup>b</sup> scores</td>
                <td>EDA<sup>c</sup> scores</td>
                <td>RR<sup>d</sup> scores</td>
                <td>Multiple modality</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>BL<sup>e</sup> vs PL<sup>f</sup>1</td>
                <td>82.14</td>
                <td>86</td>
                <td>79.18</td>
                <td>84.62</td>
                <td>82.14</td>
              </tr>
              <tr valign="top">
                <td>BL vs PL2</td>
                <td>86.11</td>
                <td>84.53</td>
                <td>82.94</td>
                <td>88.24</td>
                <td>86.11</td>
              </tr>
              <tr valign="top">
                <td>BL vs PL3</td>
                <td>75</td>
                <td>78.12</td>
                <td>75</td>
                <td>76.23</td>
                <td>75</td>
              </tr>
              <tr valign="top">
                <td>Mean (SD)</td>
                <td>81.08 (5.03)</td>
                <td>82.8 (5.03)</td>
                <td>79.04 (5.03)</td>
                <td>83.03 (5.03)</td>
                <td>81.08 (4.59)</td>
              </tr>
              <tr valign="top">
                <td>Classifier configuration</td>
                <td>LSTM<sup>g</sup> AE<sup>h</sup> (10 s), Strong, SVM<sup>j</sup></td>
                <td>HC<sup>i</sup> (10 s), Snorkel, SVM</td>
                <td>CNN<sup>k</sup> AE (10 s), Strong, SVM</td>
                <td>HC (10 s), Strong, SVM</td>
                <td>EF<sup>l</sup>, LSTM AE (10 s), Strong, SVM</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>ECG: electrocardiogram.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>EMG: electromyogram.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>EDA: electrodermal activity.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>RR: respiration rate.</p>
            </fn>
            <fn id="table1fn5">
              <p><sup>e</sup>BL: baseline.</p>
            </fn>
            <fn id="table1fn6">
              <p><sup>f</sup>PL: pain level.</p>
            </fn>
            <fn id="table1fn7">
              <p><sup>g</sup>LSTM: long short-term memory.</p>
            </fn>
            <fn id="table1fn8">
              <p><sup>h</sup>AE: autoencoder.</p>
            </fn>
            <fn id="table1fn9">
              <p><sup>i</sup>HC: handcrafted.</p>
            </fn>
            <fn id="table1fn10">
              <p><sup>j</sup>SVM: support vector machine.</p>
            </fn>
            <fn id="table1fn11">
              <p><sup>k</sup>CNN: convoluted neural network.</p>
            </fn>
            <fn id="table1fn12">
              <p><sup>l</sup>EF: early fusion.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Multiple modalities: comparison with other methods.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="340"/>
            <col width="330"/>
            <col width="330"/>
            <thead>
              <tr valign="top">
                <td>Study</td>
                <td>Value, mean (SD)</td>
                <td>Modalities</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Werner et al [<xref ref-type="bibr" rid="ref17">17</xref>]</td>
                <td>65.02 (8.72)</td>
                <td>Video, ECG<sup>a</sup>, EMG<sup>b</sup>, and EDA<sup>c</sup></td>
              </tr>
              <tr valign="top">
                <td>Lopez-Martinez and Picard [<xref ref-type="bibr" rid="ref36">36</xref>]</td>
                <td>66.68 (10.87)</td>
                <td>ECG and EDA</td>
              </tr>
              <tr valign="top">
                <td>Wang et al [<xref ref-type="bibr" rid="ref37">37</xref>]</td>
                <td>70.4 (9.76)</td>
                <td>ECG, EMG, and EDA</td>
              </tr>
              <tr valign="top">
                <td>Subramaniam and Dass [<xref ref-type="bibr" rid="ref38">38</xref>]</td>
                <td>92.604 (3.49)</td>
                <td>ECG and EDA</td>
              </tr>
              <tr valign="top">
                <td>Our method</td>
                <td>81.08 (4.59)</td>
                <td>EDA, EMG, EDA, and RR<sup>d</sup></td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>ECG: electrocardiogram.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>EMG: electromyogram.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>EDA: electrodermal activity.</p>
            </fn>
            <fn id="table2fn4">
              <p><sup>d</sup>RR: respiration rate.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>This study demonstrated that RR emerged as the strongest single-modality predictor of pain intensity, particularly for distinguishing between baseline and lower pain levels. EMG performed best for higher pain intensities, while EDA and ECG showed comparatively lower effectiveness as stand-alone modalities. Multimodal models, though offering potential advantages in robustness and complementary information, generally underperformed compared with the RR single-modality models, likely due to challenges related to noise and data alignment. The study highlights the importance of modality selection and data fusion strategies for pain recognition in postoperative settings.</p>
        <sec>
          <title>Performance by Modality</title>
          <sec>
            <title>Pain Recognition Using RR Alone</title>
            <p>From the single-modality results (<xref ref-type="table" rid="table1">Table 1</xref>), it is evident that RR models outperform all other modalities, especially for the BL versus PL1 and BL versus PL2 models. This highlights the strong predictive power of RR in distinguishing between baseline and lower pain intensities. The best-performing model used RR alone. One justification for these results could be the dynamic nature of RR signals in response to pain stimuli. Since we effectively isolated and captured periods of higher pain intensity with smaller window sizes, this could have helped the models better distinguish between baseline and other pain levels.</p>
          </sec>
          <sec>
            <title>Pain Recognition Using EMG Alone</title>
            <p>For the highest pain category (BL vs PL3), the EMG model outperformed other single-modality models. This suggests that facial muscle activation captured by EMG signals is particularly effective for distinguishing higher pain intensities. The comparatively lower performance of other modalities, such as EDA, could be attributed to the subtle variations in their responses to different pain levels.</p>
          </sec>
          <sec>
            <title>Pain Recognition Using EDA Alone</title>
            <p>EDA models exhibited comparatively lower performance across all pain categories. This may be due to the difficulty in capturing clear variations in EDA signal responses to different pain levels, as observed in our experiments.</p>
          </sec>
          <sec>
            <title>Pain Recognition Using ECG Alone</title>
            <p>While ECG features contributed strongly to the performance of multimodal models, their single-modality results were not as robust as those of RR or EMG. However, the best-performing multimodal models shared identical configurations with the best ECG models, suggesting that ECG features had a significant influence on the multimodal results.</p>
          </sec>
        </sec>
        <sec>
          <title>Challenges With Extremes in Pain Levels</title>
          <p>The BL versus PL1 and BL versus PL3 models had relatively poor performances across both single and multimodal approaches. BL versus PL1 struggled to distinguish the baseline from the lowest pain intensity due to the subtlety of the physiological responses collected while experiencing this pain level. The BL versus PL3 model, however, found it challenging to distinguish pain levels due to the scarcity of labels for the highest pain intensity. Although data augmentation can help mitigate these challenges, there is no substitute for real data. On the contrary, the BL versus PL2 models performed better due to the relative abundance of such labels reported during trials.</p>
        </sec>
        <sec>
          <title>Multimodal Performance</title>
          <p>The best-performing multimodal model was trained on automatic features outputted from an LSTM network with a 10-second window size. This model, which made use of strong labels without any data augmentation techniques, achieved comparable results to the best-performing ECG single-modality model. Early fusion outperformed late fusion, likely due to its ability to detect correlations across modalities during feature selection [<xref ref-type="bibr" rid="ref39">39</xref>]. By treating each modality as independent, late fusion might lose correlations in the combined feature space.</p>
          <p>However, single-modality models, particularly RR, generally outperformed multimodal models. This contrasts with previous studies on healthy participants, where multimodal approaches typically excelled. Our findings suggest that the unique challenges of real-world postoperative data, including noise and missing signals, may complicate the integration of multiple modalities.</p>
        </sec>
        <sec>
          <title>Advantages and Trade-Offs</title>
          <p>While multimodal models have the potential to add complementary information and robustness, they also introduce challenges related to data alignment and noise management. Single-modality models, by contrast, are simpler, easier to interpret, and computationally less expensive. These advantages make single modalities, such as RR and EMG, attractive for certain applications despite the overall potential of multimodal approaches. Multiple modalities certainly have the potential to add more useful information over a single modality and can be used to introduce complementary information and resiliency when any one modality fails or is too noisy [<xref ref-type="bibr" rid="ref40">40</xref>].</p>
          <p>While comparing our results to previous studies [<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref36">36</xref>-<xref ref-type="bibr" rid="ref38">38</xref>] in <xref ref-type="table" rid="table2">Table 2</xref>, it can be observed that our models outperform most of their models in mean pain assessment scores except Subramaniam and Dass [<xref ref-type="bibr" rid="ref38">38</xref>]. However, this is not entirely a fair comparison because we use 3 pain levels instead of 4, and our patients are not healthy.</p>
          <p>An additional consideration is the comfort and compliance of patients wearing multiple biosensors, especially in postoperative settings. While multimodal models rely on multiple sources of data, this could pose a burden to patients who may already be experiencing discomfort. Future iterations of the framework could focus on optimizing the number of biosensors by identifying the most informative modalities. This optimization could improve patient compliance while maintaining the accuracy and robustness of the system.</p>
        </sec>
        <sec>
          <title>Limitations</title>
          <p>The main limitation of our algorithm is the presence of noise in the form of motion artifacts produced while collecting physiological signals. Since we obtained data from real postoperative patients in a clinical setting, they were allowed to move more freely compared to experiments performed in controlled laboratory settings. The presence of these motion artifacts diminished the quality of our data, thus negatively impacting our machine learning algorithms.</p>
          <p>In addition, our study was conducted in a setting with a limited and relatively homogeneous patient population. While this setting allowed us to focus on developing and testing the algorithm, it restricts the generalizability of our findings to broader and more diverse clinical environments. Testing the model in varied clinical settings and across a larger, more diverse patient population is essential for evaluating its scalability and effectiveness in real-world scenarios. This remains an important future research direction.</p>
          <p>Furthermore, we must acknowledge the more complicated facets of pain that are not fully captured by our algorithm, such as the number of days after surgery, the amount of pain medication administered, and the location and type of pain experienced. Incorporating these factors in future studies could improve the accuracy and robustness of pain assessment systems.</p>
        </sec>
      </sec>
      <sec>
        <title>Future Directions</title>
        <p>One of the main research directions we would like to explore is the development of real-time multimodal pain assessment systems using deep learning architectures. In such scenarios, missing or incomplete data from one or more modalities are likely to be encountered. Real-time systems also face limitations related to computational complexity and power constraints. Building on the experiments conducted in this study, we aim to create models capable of dynamically determining which modalities to use in an energy-efficient manner without compromising performance given the clinical context.</p>
        <p>In addition, a promising avenue for future work is to build personalized machine learning models. These models could leverage data from groups of similar patients while being fine-tuned to make predictions for individual patients. This personalized approach accounts for the large interindividual variability in pain perception, which makes a monolithic model unsuitable. Previous research has demonstrated the feasibility of using multitask machine learning to address variability in mood prediction tasks [<xref ref-type="bibr" rid="ref41">41</xref>]. This strategy could be extended to the domain of pain assessment, not only for acute postoperative pain but also for chronic pain scenarios. Personalized modeling will be a vital step toward creating clinically viable and effective pain assessment algorithms.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>In this paper, we presented a multimodal machine learning framework for classifying pain in real postoperative patients using the iHurt Pain Database. Both traditional handcrafted features and deep learning–generated automatic features were extracted from physiological signals (ECG, EDA, EMG, and PPG). Several experiments were conducted to perform binary classification among 3 different pain intensities versus baseline levels of pain. Models were varied based on the modalities used, the data augmentation techniques applied (SMOTE, Snorkel, or both), the machine learning algorithms used, and the modality fusion methods implemented.</p>
        <p>Our results showed that binary pain classification significantly benefits from the application of data augmentation techniques in conjunction with automatic features. The single-modality models based on RR and EMG outperformed the multimodal models. The BL versus PL3 model with the best results was trained on EMG data alone, highlighting the importance of facial muscle activation in distinguishing higher pain intensities from baseline levels. This finding is consistent from a clinical perspective, as higher pain intensities are commonly associated with acute pain.</p>
        <p>Overall, this study highlights a novel approach to addressing the challenges of building a pain recognition system for real postoperative patients, particularly constraints such as label imbalances and missing data. By employing robust data preprocessing techniques, data augmentation strategies, and multimodal fusion approaches, our framework demonstrates the potential for accurate and objective pain classification in clinical settings. These findings lay the groundwork for advancing multimodal pain assessment methods tailored to real-world clinical scenarios.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AdaBoost</term>
          <def>
            <p>adaptive boosting</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">AE</term>
          <def>
            <p>autoencoder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CPOT</term>
          <def>
            <p>Critical Care Pain Observation Tool</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">ECG</term>
          <def>
            <p>electrocardiogram</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">EDA</term>
          <def>
            <p>electrodermal activity</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">EMG</term>
          <def>
            <p>electromyogram</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">KNN</term>
          <def>
            <p>k-nearest neighbors</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">LSTM</term>
          <def>
            <p>long short-term memory</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">NRS</term>
          <def>
            <p>Numerical Rating Scale</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">PPG</term>
          <def>
            <p>photoplethysmography</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">RF</term>
          <def>
            <p>random forest</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">RR</term>
          <def>
            <p>respiration rate</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">SMOTE</term>
          <def>
            <p>synthetic minority oversampling technique</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb15">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Merskey</surname>
              <given-names>HA</given-names>
            </name>
          </person-group>
          <article-title>Pain terms: a list with definitions and notes on usage. Recommended by the IASP subcommittee on taxonomy</article-title>
          <source>Pain</source>
          <year>1979</year>
          <volume>6</volume>
          <issue>3</issue>
          <fpage>249</fpage>
          <pub-id pub-id-type="medline">460932</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Younger</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>McCue</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mackey</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Pain outcomes: a brief review of instruments and techniques</article-title>
          <source>Curr Pain Headache Rep</source>
          <year>2009</year>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>39</fpage>
          <lpage>43</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/19126370"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11916-009-0009-x</pub-id>
          <pub-id pub-id-type="medline">19126370</pub-id>
          <pub-id pub-id-type="pmcid">PMC2891384</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kent</surname>
              <given-names>ML</given-names>
            </name>
            <name name-style="western">
              <surname>Tighe</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Belfer</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bruehl</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brummett</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Buckenmaier</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Buvanendran</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>RI</given-names>
            </name>
            <name name-style="western">
              <surname>Desjardins</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fillingim</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gewandter</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gordon</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Hurley</surname>
              <given-names>RW</given-names>
            </name>
            <name name-style="western">
              <surname>Kehlet</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Loeser</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Mackey</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>McLean</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Polomano</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Rahman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Raja</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rowbotham</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Suresh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schachtel</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Schreiber</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Schumacher</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Stacey</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Stanos</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Todd</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Turk</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Weisman</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Carr</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Dworkin</surname>
              <given-names>RH</given-names>
            </name>
            <name name-style="western">
              <surname>Terman</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>The ACTTION-APS-AAPM pain taxonomy (AAAPT) multidimensional approach to classifying acute pain conditions</article-title>
          <source>Pain Med</source>
          <year>2017</year>
          <volume>18</volume>
          <issue>5</issue>
          <fpage>947</fpage>
          <lpage>958</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28482098"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/pm/pnx019</pub-id>
          <pub-id pub-id-type="medline">28482098</pub-id>
          <pub-id pub-id-type="pii">3798726</pub-id>
          <pub-id pub-id-type="pmcid">PMC5431381</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bonica</surname>
              <given-names>JJ</given-names>
            </name>
          </person-group>
          <article-title>Management of cancer pain</article-title>
          <source>Pain in the Cancer Patient: Pathogenesis, Diagnosis and Therapy</source>
          <year>1984</year>
          <publisher-loc>Berlin, Germany</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Treede</surname>
              <given-names>RD</given-names>
            </name>
            <name name-style="western">
              <surname>Rief</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Barke</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Aziz</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Bennett</surname>
              <given-names>MI</given-names>
            </name>
            <name name-style="western">
              <surname>Benoliel</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Evers</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Finnerup</surname>
              <given-names>NB</given-names>
            </name>
            <name name-style="western">
              <surname>First</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Giamberardino</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Kaasa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kosek</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Lavand'homme</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Nicholas</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Perrot</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Scholz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schug</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>BH</given-names>
            </name>
            <name name-style="western">
              <surname>Svensson</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Vlaeyen</surname>
              <given-names>JWS</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>SJ</given-names>
            </name>
          </person-group>
          <article-title>A classification of chronic pain for ICD-11</article-title>
          <source>Pain</source>
          <year>2015</year>
          <volume>156</volume>
          <issue>6</issue>
          <fpage>1003</fpage>
          <lpage>1007</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/25844555"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/j.pain.0000000000000160</pub-id>
          <pub-id pub-id-type="medline">25844555</pub-id>
          <pub-id pub-id-type="pii">00006396-201506000-00006</pub-id>
          <pub-id pub-id-type="pmcid">PMC4450869</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chou</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gordon</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>de Leon-Casasola</surname>
              <given-names>OA</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenberg</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Bickler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brennan</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Carter</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Cassidy</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Chittenden</surname>
              <given-names>EH</given-names>
            </name>
            <name name-style="western">
              <surname>Degenhardt</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Griffith</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Manworren</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>McCarberg</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Montgomery</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Perkal</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Suresh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sluka</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Strassels</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Thirlby</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Viscusi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Walco</surname>
              <given-names>GA</given-names>
            </name>
            <name name-style="western">
              <surname>Warner</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Weisman</surname>
              <given-names>SJ</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>CL</given-names>
            </name>
          </person-group>
          <article-title>Management of postoperative pain: a clinical practice guideline from the American pain society, the American society of regional anesthesia and pain medicine, and the American society of anesthesiologists' committee on regional anesthesia, executive committee, and administrative council</article-title>
          <source>J Pain</source>
          <year>2016</year>
          <volume>17</volume>
          <issue>2</issue>
          <fpage>131</fpage>
          <lpage>157</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1526-5900(15)00995-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jpain.2015.12.008</pub-id>
          <pub-id pub-id-type="medline">26827847</pub-id>
          <pub-id pub-id-type="pii">S1526-5900(15)00995-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Small</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Laycock</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Acute postoperative pain management</article-title>
          <source>Br J Surg</source>
          <year>2020</year>
          <volume>107</volume>
          <issue>2</issue>
          <fpage>e70</fpage>
          <lpage>e80</lpage>
          <pub-id pub-id-type="doi">10.1002/bjs.11477</pub-id>
          <pub-id pub-id-type="medline">31903595</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Herr</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Coyne</surname>
              <given-names>PJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ely</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Gélinas</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Manworren</surname>
              <given-names>RCB</given-names>
            </name>
          </person-group>
          <article-title>Pain assessment in the patient unable to self-report: clinical practice recommendations in support of the ASPMN 2019 position statement</article-title>
          <source>Pain Manag Nurs</source>
          <year>2019</year>
          <volume>20</volume>
          <issue>5</issue>
          <fpage>404</fpage>
          <lpage>417</lpage>
          <pub-id pub-id-type="doi">10.1016/j.pmn.2019.07.005</pub-id>
          <pub-id pub-id-type="medline">31610992</pub-id>
          <pub-id pub-id-type="pii">S1524-9042(19)30162-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Craig</surname>
              <given-names>Kd</given-names>
            </name>
          </person-group>
          <article-title>The facial expression of pain Better than a thousand words?</article-title>
          <source>APS Journal</source>
          <year>1992</year>
          <month>9</month>
          <volume>1</volume>
          <issue>3</issue>
          <fpage>153</fpage>
          <lpage>162</lpage>
          <pub-id pub-id-type="doi">10.1016/1058-9139(92)90001-S</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hadjistavropoulos</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Craig</surname>
              <given-names>KD</given-names>
            </name>
          </person-group>
          <article-title>A theoretical framework for understanding self-report and observational measures of pain: a communications model</article-title>
          <source>Behav Res Ther</source>
          <year>2002</year>
          <volume>40</volume>
          <issue>5</issue>
          <fpage>551</fpage>
          <lpage>570</lpage>
          <pub-id pub-id-type="doi">10.1016/s0005-7967(01)00072-9</pub-id>
          <pub-id pub-id-type="medline">12038648</pub-id>
          <pub-id pub-id-type="pii">S0005-7967(01)00072-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Staton</surname>
              <given-names>LJ</given-names>
            </name>
            <name name-style="western">
              <surname>Panda</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Genao</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kurz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pasanen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mechaber</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>Menon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>O'Rorke</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wood</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenberg</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Faeslis</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Carey</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Calleson</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Cykert</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>When race matters: disagreement in pain perception between patients and their physicians in primary care</article-title>
          <source>J Natl Med Assoc</source>
          <year>2007</year>
          <volume>99</volume>
          <issue>5</issue>
          <fpage>532</fpage>
          <lpage>538</lpage>
          <pub-id pub-id-type="medline">17534011</pub-id>
          <pub-id pub-id-type="pmcid">PMC2576060</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gélinas</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Boitor</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Puntillo</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Arbour</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Topolovec-Vranic</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cusimano</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Choinière</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Streiner</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>Behaviors indicative of pain in brain-injured adult patients with different levels of consciousness in the intensive care unit</article-title>
          <source>J Pain Symptom Manage</source>
          <year>2019</year>
          <volume>57</volume>
          <issue>4</issue>
          <fpage>761</fpage>
          <lpage>773</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S0885-3924(18)31481-7"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jpainsymman.2018.12.333</pub-id>
          <pub-id pub-id-type="medline">30593909</pub-id>
          <pub-id pub-id-type="pii">S0885-3924(18)31481-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Achterberg</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Lautenbacher</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Husebo</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Erdal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Herr</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Pain in dementia</article-title>
          <source>Schmerz</source>
          <year>2021</year>
          <volume>35</volume>
          <issue>2</issue>
          <fpage>130</fpage>
          <lpage>138</lpage>
          <pub-id pub-id-type="doi">10.1007/s00482-020-00501-w</pub-id>
          <pub-id pub-id-type="medline">32926239</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00482-020-00501-w</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Walter</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gruss</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ehleiter</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Tan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Traue</surname>
              <given-names>HC</given-names>
            </name>
            <name name-style="western">
              <surname>Werner</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Hamadi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Crawcour</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Andrade</surname>
              <given-names>AO</given-names>
            </name>
            <name name-style="western">
              <surname>da Silva</surname>
              <given-names>GM</given-names>
            </name>
          </person-group>
          <article-title>The biovid heat pain database data for the advancement and systematic validation of an automated pain recognition system</article-title>
          <year>2013</year>
          <conf-name>Proceedings of the IEEE International Conference on Cybernetics (CYBCO)</conf-name>
          <conf-date>Jun 13-15, 2013</conf-date>
          <conf-loc>Lausanne, Switzerland</conf-loc>
          <fpage>128</fpage>
          <lpage>131</lpage>
          <pub-id pub-id-type="doi">10.1109/CYBConf.2013.6617456</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Werner</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Hamadi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Niese</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Walter</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gruss</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Traue</surname>
              <given-names>HC</given-names>
            </name>
          </person-group>
          <article-title>Towards pain monitoring: Facial expression, head pose, a new database, an automatic system and remaining challenges</article-title>
          <year>2013</year>
          <conf-name>Proceedings of the British Machine Vision Conference</conf-name>
          <conf-date>September 01, 2013</conf-date>
          <conf-loc>London, UK</conf-loc>
          <fpage>1</fpage>
          <lpage>13</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://citeseerx.ist.psu.edu/document?repid=rep1&#38;type=pdf&#38;doi=03f075e95638bc66e687badd97a58c5de67e58e6"/>
          </comment>
          <pub-id pub-id-type="doi">10.5244/C.27.119</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hammal</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Cohn</surname>
              <given-names>JF</given-names>
            </name>
          </person-group>
          <article-title>Automatic detection of pain intensity</article-title>
          <source>Proc ACM Int Conf Multimodal Interact</source>
          <year>2012</year>
          <volume>2012</volume>
          <fpage>47</fpage>
          <lpage>52</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32724903"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/2388676.2388688</pub-id>
          <pub-id pub-id-type="medline">32724903</pub-id>
          <pub-id pub-id-type="pmcid">PMC7385931</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Werner</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Hamadi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Niese</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Walter</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gruss</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Traue</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Automatic pain recognition from video and biomedical signals</article-title>
          <year>2014</year>
          <conf-name>Proceedings of the 22nd International Conference on Pattern Recognition</conf-name>
          <conf-date>August 24-28, 2014</conf-date>
          <conf-loc>Stockholm, Sweden</conf-loc>
          <fpage>4582</fpage>
          <lpage>4587</lpage>
          <pub-id pub-id-type="doi">10.1109/ICPR.2014.784</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sarker</surname>
              <given-names>VK</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gia</surname>
              <given-names>TN</given-names>
            </name>
            <name name-style="western">
              <surname>Anzanpour</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liljeberg</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Portable multipurpose bio-signal acquisition and wireless streaming device for wearables</article-title>
          <year>2017</year>
          <conf-name>Proceedings of the IEEE Sensors Applications Symposium (SAS)</conf-name>
          <conf-date>March 13, 2017</conf-date>
          <conf-loc>New York City, NY</conf-loc>
          <fpage>1</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1109/SAS.2017.7894053</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="web">
          <source>Empatica E4 User Manual</source>
          <year>2015</year>
          <access-date>2024-12-13</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.manualslib.com/manual/1318955/Empatica-E4.html">https://www.manualslib.com/manual/1318955/Empatica-E4.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kasaeyan Naeini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Syrjälä</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Calderon</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Mieronkoski</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Dutt</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liljeberg</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Salanterä</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Prospective study evaluating a pain assessment tool in a postoperative environment: protocol for algorithm testing and enhancement</article-title>
          <source>JMIR Res Protoc</source>
          <year>2020</year>
          <volume>9</volume>
          <issue>7</issue>
          <fpage>e17783</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchprotocols.org/2020/7/e17783/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/17783</pub-id>
          <pub-id pub-id-type="medline">32609091</pub-id>
          <pub-id pub-id-type="pii">v9i7e17783</pub-id>
          <pub-id pub-id-type="pmcid">PMC7367536</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gomes</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Margaritoff</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Silva</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>pyHRV: development and evaluation of an open-source Python toolbox for heart rate variability (HRV)</article-title>
          <year>2019</year>
          <conf-name>Proceedings of the International Conference on Electrical, Electronic and Computing Engineering (IcETRAN)</conf-name>
          <conf-date>June 01, 2019</conf-date>
          <conf-loc>Serbia</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.researchgate.net/publication/333611305_pyHRV_Development_and_Evaluation_of_an_Open-Source_Python_Toolbox_for_Heart_Rate_Variability_HRV"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laitala</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Jiang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Syrjälä</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Naeini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Airola</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dutt</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liljeberg</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Robust ECG R-peak detection using LSTM</article-title>
          <year>2020</year>
          <conf-name>Proceedings of the 35th Annual ACM Symposium on Applied Computing</conf-name>
          <conf-date>March 30, 2020</conf-date>
          <conf-loc>Brno, Czech Republic</conf-loc>
          <fpage>1104</fpage>
          <lpage>1111</lpage>
          <pub-id pub-id-type="doi">10.1145/3341105.3373945</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kasaeyan Naeini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Subramanian</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Calderon</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Dutt</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liljeberg</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Salantera</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Pain recognition with electrocardiographic features in postoperative patients: method validation study</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>e25079</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/5/e25079/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/25079</pub-id>
          <pub-id pub-id-type="medline">34047710</pub-id>
          <pub-id pub-id-type="pii">v23i5e25079</pub-id>
          <pub-id pub-id-type="pmcid">PMC8196363</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aqajari</surname>
              <given-names>SAH</given-names>
            </name>
            <name name-style="western">
              <surname>Naeini</surname>
              <given-names>EK</given-names>
            </name>
            <name name-style="western">
              <surname>Mehrabadi</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Labbaf</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dutt</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>pyEDA: an open-source Python toolkit for pre-processing and feature extraction of electrodermal activity</article-title>
          <source>Procedia Comput. Sci</source>
          <year>2021</year>
          <volume>184</volume>
          <fpage>99</fpage>
          <lpage>106</lpage>
          <pub-id pub-id-type="doi">10.1016/j.procs.2021.03.021</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aqajari</surname>
              <given-names>SAH</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Kasaeyan Naeini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Calderon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Dutt</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Liljeberg</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Salanterä</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nelson</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Pain assessment tool with electrodermal activity for postoperative patients: method validation study</article-title>
          <source>JMIR Mhealth Uhealth</source>
          <year>2021</year>
          <volume>9</volume>
          <issue>5</issue>
          <fpage>e25258</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mhealth.jmir.org/2021/5/e25258/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/25258</pub-id>
          <pub-id pub-id-type="medline">33949957</pub-id>
          <pub-id pub-id-type="pii">v9i5e25258</pub-id>
          <pub-id pub-id-type="pmcid">PMC8135033</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Greco</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Valenza</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lanata</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Scilingo</surname>
              <given-names>EP</given-names>
            </name>
            <name name-style="western">
              <surname>Citi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>cvxEDA: a convex optimization approach to electrodermal activity processing</article-title>
          <source>IEEE Trans. Biomed. Eng</source>
          <year>2016</year>
          <volume>63</volume>
          <issue>4</issue>
          <fpage>797</fpage>
          <lpage>804</lpage>
          <pub-id pub-id-type="doi">10.1109/tbme.2015.2474131</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Aqajari</surname>
              <given-names>SAH</given-names>
            </name>
            <name name-style="western">
              <surname>Kasaeyan Naeini</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Objective pain assessment using wrist-based PPG signals: a respiratory rate based method</article-title>
          <source>Annu Int Conf IEEE Eng Med Biol Soc</source>
          <year>2021</year>
          <volume>2021</volume>
          <fpage>1164</fpage>
          <lpage>1167</lpage>
          <pub-id pub-id-type="doi">10.1109/EMBC46164.2021.9630002</pub-id>
          <pub-id pub-id-type="medline">34891494</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Madhav</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ram</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Krishna</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Komalla</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Reddy</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Estimation of respiration rate from ECG, BPPPG signals using empirical mode decomposition</article-title>
          <year>2011</year>
          <conf-name>Proceedings of the IEEE International Instrumentation and Measurement Technology Conference</conf-name>
          <conf-date>May 10-12, 2011</conf-date>
          <conf-loc>Hangzhou, China</conf-loc>
          <pub-id pub-id-type="doi">10.1109/imtc.2011.5944249</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schmidhuber</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep learning in neural networks: an overview</article-title>
          <source>Neural Netw</source>
          <year>2015</year>
          <volume>61</volume>
          <fpage>85</fpage>
          <lpage>117</lpage>
          <pub-id pub-id-type="doi">10.1016/j.neunet.2014.09.003</pub-id>
          <pub-id pub-id-type="medline">25462637</pub-id>
          <pub-id pub-id-type="pii">S0893-6080(14)00213-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Le</surname>
              <given-names>QV</given-names>
            </name>
          </person-group>
          <article-title>A tutorial on deep learning part 2: Autoencoders, convolutional neural networks and recurrent neural networks</article-title>
          <source>Google Brain</source>
          <year>2015</year>
          <fpage>1</fpage>
          <lpage>20</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cs.stanford.edu/~quocle/tutorial2.pdfhttps://cs.stanford.edu/~quocle/tutorial2.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chawla</surname>
              <given-names>NV</given-names>
            </name>
            <name name-style="western">
              <surname>Bowyer</surname>
              <given-names>KW</given-names>
            </name>
            <name name-style="western">
              <surname>Hall</surname>
              <given-names>LO</given-names>
            </name>
            <name name-style="western">
              <surname>Kegelmeyer</surname>
              <given-names>WP</given-names>
            </name>
          </person-group>
          <article-title>SMOTE: Synthetic Minority Over-sampling Technique</article-title>
          <source>jair</source>
          <year>2002</year>
          <month>06</month>
          <day>01</day>
          <volume>16</volume>
          <issue>1</issue>
          <fpage>321</fpage>
          <lpage>357</lpage>
          <pub-id pub-id-type="doi">10.1613/jair.953</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lemaître</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Nogueira</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Aridas</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Imbalanced-learn: a python toolbox to tackle the curse of imbalanced datasets in machine learning</article-title>
          <source>J Mach Learn Res</source>
          <year>2017</year>
          <volume>18</volume>
          <issue>1</issue>
          <fpage>559</fpage>
          <lpage>563</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://jmlr.org/papers/v18/16-365.html"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ratner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Bach</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Ehrenberg</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Fries</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ré</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Snorkel: rapid training data creation with weak supervision</article-title>
          <source>Proceedings VLDB Endowment</source>
          <year>2017</year>
          <volume>11</volume>
          <issue>3</issue>
          <fpage>269</fpage>
          <lpage>282</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/29770249"/>
          </comment>
          <pub-id pub-id-type="doi">10.14778/3157794.3157797</pub-id>
          <pub-id pub-id-type="medline">29770249</pub-id>
          <pub-id pub-id-type="pmcid">PMC5951191</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kohavi</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>A study of cross-validation and bootstrap for accuracy estimation and model selection</article-title>
          <source>Proc. 14th IJCAI</source>
          <year>1995</year>
          <fpage>1137</fpage>
          <lpage>1143</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ijcai.org/Proceedings/95-2/Papers/016.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gunes</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Piccardi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Affect recognition from face and body: early fusion vs. late fusion</article-title>
          <year>2005</year>
          <conf-name>Proceedings of the IEEE International Conference on Systems, Man and Cybernetics</conf-name>
          <conf-date>October 12, 2005</conf-date>
          <conf-loc>Waikoloa, HI, USA</conf-loc>
          <fpage>3437</fpage>
          <lpage>3443</lpage>
          <pub-id pub-id-type="doi">10.1109/ICSMC.2005.1571679</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lopez-Martinez</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Multi-task neural networks for personalized pain recognition from physiological signals</article-title>
          <year>2017</year>
          <conf-name>Proceedings of the Seventh International Conference on Affective Computing and Intelligent Interaction Workshops and Demos (ACIIW)</conf-name>
          <conf-date>October 23-26, 2017</conf-date>
          <conf-loc>San Antonio, TX</conf-loc>
          <fpage>181</fpage>
          <lpage>184</lpage>
          <pub-id pub-id-type="doi">10.48550/arXiv.1708.08755</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Feng</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Hybrid RNN-ANN based deep physiological network for pain recognition</article-title>
          <year>2020</year>
          <conf-name>Proceedings of the 42nd Annual International Conference of the IEEE Engineering in Medicine &#38; Biology Society (EMBC)</conf-name>
          <conf-date>July 20-24, 2020</conf-date>
          <conf-loc>Montreal, QC</conf-loc>
          <fpage>5584</fpage>
          <lpage>5587</lpage>
          <pub-id pub-id-type="doi">10.1109/EMBC44109.2020.9175247</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Subramaniam</surname>
              <given-names>SD</given-names>
            </name>
            <name name-style="western">
              <surname>Dass</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Automated nociceptive pain assessment using physiological signals and a hybrid deep learning network</article-title>
          <source>IEEE Sensors J</source>
          <year>2021</year>
          <volume>21</volume>
          <issue>3</issue>
          <fpage>3335</fpage>
          <lpage>3343</lpage>
          <pub-id pub-id-type="doi">10.1109/jsen.2020.3023656</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ross</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>SZ</given-names>
            </name>
          </person-group>
          <source>Fusion, Feature-Level</source>
          <year>2009</year>
          <publisher-loc>Boston, MA</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Naeini</surname>
              <given-names>EK</given-names>
            </name>
            <name name-style="western">
              <surname>Shahhosseini</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Kanduri</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Liljeberg</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Rahmani</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dutt</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>AMSER: adaptive multimodal sensing for energy efficient and resilient eHealth systems</article-title>
          <year>2022</year>
          <conf-name>Proceedings of the Design, Automation &#38; Test in Europe Conference &#38; Exhibition (DATE)</conf-name>
          <conf-date>March 14-23, 2022</conf-date>
          <conf-loc>Antwerp, Belgium</conf-loc>
          <fpage>1455</fpage>
          <lpage>1460</lpage>
          <pub-id pub-id-type="doi">10.48550/arXiv.2112.08176</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jaques</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Nosakhare</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sano</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Picard</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Personalized multitask learning for predicting tomorrow's mood, stress, and health</article-title>
          <source>IEEE Trans Affect Comput</source>
          <year>2020</year>
          <volume>11</volume>
          <issue>2</issue>
          <fpage>200</fpage>
          <lpage>213</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/32489521"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/TAFFC.2017.2784832</pub-id>
          <pub-id pub-id-type="medline">32489521</pub-id>
          <pub-id pub-id-type="pmcid">PMC7266106</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
