<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JFR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id>
      <journal-title>JMIR Formative Research</journal-title>
      <issn pub-type="epub">2561-326X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v8i1e55342</article-id>
      <article-id pub-id-type="pmid">38959501</article-id>
      <article-id pub-id-type="doi">10.2196/55342</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>A Deep Learning–Based Rotten Food Recognition App for Older Adults: Development and Usability Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Li</surname>
            <given-names>Zhongqiang</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hassan</surname>
            <given-names>Ahmed</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Chun</surname>
            <given-names>Minki</given-names>
          </name>
          <degrees>MS</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0241-9329</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Yu</surname>
            <given-names>Ha-Jin</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3657-0665</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Jung</surname>
            <given-names>Hyunggu</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Computer Science and Engineering</institution>
            <institution>University of Seoul</institution>
            <addr-line>Information and Technology Building</addr-line>
            <addr-line>163 Seoulsiripdae-ro, Dongdaemun-gu</addr-line>
            <addr-line>Seoul, 02504</addr-line>
            <country>Republic of Korea</country>
            <phone>82 2 6490 2455</phone>
            <email>hjung@uos.ac.kr</email>
          </address>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2967-4370</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Computer Science and Engineering</institution>
        <institution>University of Seoul</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Artificial Intelligence</institution>
        <institution>University of Seoul</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Hyunggu Jung <email>hjung@uos.ac.kr</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>3</day>
        <month>7</month>
        <year>2024</year>
      </pub-date>
      <volume>8</volume>
      <elocation-id>e55342</elocation-id>
      <history>
        <date date-type="received">
          <day>12</day>
          <month>3</month>
          <year>2024</year>
        </date>
        <date date-type="rev-request">
          <day>3</day>
          <month>4</month>
          <year>2024</year>
        </date>
        <date date-type="rev-recd">
          <day>24</day>
          <month>4</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>30</day>
          <month>5</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Minki Chun, Ha-Jin Yu, Hyunggu Jung. Originally published in JMIR Formative Research (https://formative.jmir.org), 03.07.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on https://formative.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://formative.jmir.org/2024/1/e55342" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Older adults are at greater risk of eating rotten fruits and of getting food poisoning because cognitive function declines as they age, making it difficult to distinguish rotten fruits. To address this problem, researchers have developed and evaluated various tools to detect rotten food items in various ways. Nevertheless, little is known about how to create an app to detect rotten food items to support older adults at a risk of health problems from eating rotten food items.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aimed to (1) create a smartphone app that enables older adults to take a picture of food items with a camera and classifies the fruit as rotten or not rotten for older adults and (2) evaluate the usability of the app and the perceptions of older adults about the app.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We developed a smartphone app that supports older adults in determining whether the 3 fruits selected for this study (apple, banana, and orange) were fresh enough to eat. We used several residual deep networks to check whether the fruit photos collected were of fresh fruit. We recruited healthy older adults aged over 65 years (n=15, 57.7%, males and n=11, 42.3%, females) as participants. We evaluated the usability of the app and the participants’ perceptions about the app through surveys and interviews. We analyzed the survey responses, including an after-scenario questionnaire, as evaluation indicators of the usability of the app and collected qualitative data from the interviewees for in-depth analysis of the survey responses.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The participants were satisfied with using an app to determine whether a fruit is fresh by taking a picture of the fruit but are reluctant to use the paid version of the app. The survey results revealed that the participants tended to use the app efficiently to take pictures of fruits and determine their freshness. The qualitative data analysis on app usability and participants’ perceptions about the app revealed that they found the app simple and easy to use, they had no difficulty taking pictures, and they found the app interface visually satisfactory.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This study suggests the possibility of developing an app that supports older adults in identifying rotten food items effectively and efficiently. Future work to make the app distinguish the freshness of various food items other than the 3 fruits selected still remains.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>digital health</kwd>
        <kwd>mobile health</kwd>
        <kwd>mHealth</kwd>
        <kwd>app</kwd>
        <kwd>apps</kwd>
        <kwd>application</kwd>
        <kwd>applications</kwd>
        <kwd>smartphone</kwd>
        <kwd>smartphones</kwd>
        <kwd>classification</kwd>
        <kwd>digital sensor</kwd>
        <kwd>deep learning</kwd>
        <kwd>artificial intelligence</kwd>
        <kwd>machine learning</kwd>
        <kwd>food</kwd>
        <kwd>foods</kwd>
        <kwd>fruit</kwd>
        <kwd>fruits</kwd>
        <kwd>experience</kwd>
        <kwd>experiences</kwd>
        <kwd>attitude</kwd>
        <kwd>attitudes</kwd>
        <kwd>opinion</kwd>
        <kwd>opinions</kwd>
        <kwd>perception</kwd>
        <kwd>perceptions</kwd>
        <kwd>perspective</kwd>
        <kwd>perspectives</kwd>
        <kwd>acceptance</kwd>
        <kwd>adoption</kwd>
        <kwd>usability</kwd>
        <kwd>gerontology</kwd>
        <kwd>geriatric</kwd>
        <kwd>geriatrics</kwd>
        <kwd>older adult</kwd>
        <kwd>older adults</kwd>
        <kwd>elder</kwd>
        <kwd>elderly</kwd>
        <kwd>older person</kwd>
        <kwd>older people</kwd>
        <kwd>ageing</kwd>
        <kwd>aging</kwd>
        <kwd>aged</kwd>
        <kwd>camera</kwd>
        <kwd>image</kwd>
        <kwd>imaging</kwd>
        <kwd>photo</kwd>
        <kwd>photos</kwd>
        <kwd>photograph</kwd>
        <kwd>photographs</kwd>
        <kwd>recognition</kwd>
        <kwd>picture</kwd>
        <kwd>pictures</kwd>
        <kwd>sensor</kwd>
        <kwd>sensors</kwd>
        <kwd>develop</kwd>
        <kwd>development</kwd>
        <kwd>design</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Older adults over the age of 65 years need a system that supports distinguishing rotten food items because older adults are exposed to the danger of eating rotten food and suffer from health problems [<xref ref-type="bibr" rid="ref1">1</xref>]. Compared to younger age groups, older adults have a lower ability to recognize whether food items are rotten [<xref ref-type="bibr" rid="ref2">2</xref>], and as they get older, their cognitive function decreases [<xref ref-type="bibr" rid="ref3">3</xref>], which causes a proportionate overall decrease in visual, olfactory, and gustatory functions [<xref ref-type="bibr" rid="ref4">4</xref>].</p>
        <p>Prior studies have found that decreased olfactory function increases the chances of eating rotten foods. According to a study by the University of Pennsylvania Medical School, olfactory disorders affect the quality of life, appetite, and weight [<xref ref-type="bibr" rid="ref5">5</xref>]. Similarly, the results of the National Health and Nutrition Examination Survey in the United States found that people over the age of 70 years have difficulty recognizing dangerous odors, such as those of smoke and gas [<xref ref-type="bibr" rid="ref6">6</xref>]. As such, older adults are in danger of being exposed to food poisoning if they ingest rotten food items due to the difficulty detecting whether the items have gone bad. Food poisoning is caused by the ingestion of toxic substances, as well as bacteria contained in rotten food items [<xref ref-type="bibr" rid="ref7">7</xref>]. To address this danger, researchers have developed and evaluated tools to detect rotten food items [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>] for unspecified users or specific users, such as people with olfactory impairment. Prior studies have also used chemical sensors and kits and, in some cases, cameras to detect rotten food items and measure freshness [<xref ref-type="bibr" rid="ref10">10</xref>].</p>
      </sec>
      <sec>
        <title>Rotten Food Item–Detecting Tools Targeted at Unspecified Users</title>
        <p>Prior studies have proposed and evaluated tools to measure food freshness for unspecified users [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref21">21</xref>]. Researchers have used chemical sensors or kits to detect the spoilage of specific food items such as pork and chicken [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref14">14</xref>] or unspecified food items [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>], such as ham [<xref ref-type="bibr" rid="ref11">11</xref>], fruits (eg, banana, grape) [<xref ref-type="bibr" rid="ref12">12</xref>], and pork [<xref ref-type="bibr" rid="ref13">13</xref>]. Specifically, to detect spoilage of ham produced in Spain, Choi et al [<xref ref-type="bibr" rid="ref11">11</xref>] proposed a tool that determines whether ham is rotten using a sensor that responds to sulfur-containing compounds in the gas generated during ham decay. In addition, Caya et al [<xref ref-type="bibr" rid="ref12">12</xref>] proposed a tool with an electronic nose to detect gas generated during the decay of bananas, carrots, and grapes by using k-nearest neighbors and principal component analysis of the collected data. Similarly, Tian et al [<xref ref-type="bibr" rid="ref13">13</xref>] proposed a tool that detects spoilage of pork by determining the number of aerobic bacteria in pork through a sensor and principal component analysis. Meanwhile, Mikš‐Krajnik et al [<xref ref-type="bibr" rid="ref14">14</xref>] proposed a tool detect 27 kinds of volatile organic compounds generated during the decay of chickens by selecting 3 types of indicators highly correlated with spoilage in order to detect spoilage of chickens. Researchers have also proposed tools with an electronic nose to detect decay using gas generated from meat, fruits, and vegetables [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. Meanwhile, Janagama et al [<xref ref-type="bibr" rid="ref15">15</xref>] used an organism detection kit called a dipstick to detect wine spoilage.</p>
        <p>Researchers have also proposed various methods (eg, convolutional neural network [CNN], Fourier transform infrared spectroscopy [FTIR]) to detect the spoilage of specific food items using a camera [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref17">17</xref>-<xref ref-type="bibr" rid="ref21">21</xref>]. Perez-Daniel et al [<xref ref-type="bibr" rid="ref17">17</xref>] used a camera to detect the spoilage of unspecified food items. They proposed a tool that collects images of both normal and rotten food items through a neural network using RetinaNet and compared them to detect the food spoilage. Other researchers have proposed tools to detect the spoilage of certain food items, such as fruits [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>], vegetables [<xref ref-type="bibr" rid="ref9">9</xref>], beef [<xref ref-type="bibr" rid="ref20">20</xref>], and rice [<xref ref-type="bibr" rid="ref21">21</xref>]. For example, Karakaya et al [<xref ref-type="bibr" rid="ref8">8</xref>] obtained a co-occurrence matrix from a grayscale histogram of an image to detect decay in apples, bananas, and oranges and extracted features from the obtained matrix using the bag-of-feature method. The extracted features were classified into normal food items and rotten food items using a CNN and a support vector machine (SVM). In addition, researchers have proposed tools to detect decay in citrus fruits [<xref ref-type="bibr" rid="ref19">19</xref>] and other fruits [<xref ref-type="bibr" rid="ref18">18</xref>] using visual features. Jagtap et al [<xref ref-type="bibr" rid="ref9">9</xref>] proposed a tool to take pictures of potatoes moving through a conveyor belt and detect decay in them using a neural network and features extracted from the pictures. Meanwhile, Ellis et al [<xref ref-type="bibr" rid="ref20">20</xref>] used FTIR and machine learning to detect food spoilage through chemical changes in beef. Additionally, Batugal et al [<xref ref-type="bibr" rid="ref21">21</xref>] proposed a tool to detect decay in Philippine rice using both a chemical sensor and a camera; this tool uses machine learning to classify data collected into spoiled rice and normal rice [<xref ref-type="bibr" rid="ref21">21</xref>].</p>
      </sec>
      <sec>
        <title>Rotten Food Item–Detecting Tools Targeted at Specific Users</title>
        <p>Prior studies have proposed and evaluated tools to measure food freshness for specific users, such as people who manage the freshness of meats [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. These tools detect food spoilage, even in unspecified food items [<xref ref-type="bibr" rid="ref22">22</xref>], using chemical sensors or kits [<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref25">25</xref>]. Specifically, researchers have proposed and evaluated tools to determine the spoilage of tomato dishes [<xref ref-type="bibr" rid="ref24">24</xref>], fish and beef [<xref ref-type="bibr" rid="ref24">24</xref>], and milk [<xref ref-type="bibr" rid="ref25">25</xref>] using a gas sensor. Researchers who proposed and evaluated a tool to detect spoilage of tomato dishes in the Philippines using an electronic nose for people with an olfactory impairment equipped with a methane/CH<sub>4</sub> quality (MQ) gas sensor and a temperature and humidity sensor to reduce the spoilage of tomato dishes [<xref ref-type="bibr" rid="ref23">23</xref>]. Similarly, a tool for identifying the spoilage of fish and beef using an electronic nose, an artificial neural network, an SVM, and k-nearest neighbors was proposed for food freshness inspection by butchers [<xref ref-type="bibr" rid="ref24">24</xref>]. Another study proposed a tool for identifying spoiled milk using an electronic nose and fuzzy c-means clustering to prevent older adults suffering from olfactory disorders and dementia from drinking spoiled milk [<xref ref-type="bibr" rid="ref25">25</xref>]. Musa et al [<xref ref-type="bibr" rid="ref22">22</xref>] proposed a film for packaging material developers that changes color when it encounters rotten food items by sensing the pH with corn starch-glycerol and anthocyanin.</p>
        <p>A previous study proposed a tool for certain users to use a chemical sensor and a camera to determine food spoilage. Kodogiannis et al [<xref ref-type="bibr" rid="ref26">26</xref>] proposed a tool that detects microorganisms in meat using FTIR and an advanced clustering-based neuro-fuzzy identification model, determining the degree of meat spoilage.</p>
      </sec>
      <sec>
        <title>Limitations of Prior Studies</title>
        <p>Nevertheless, little is known about studies that have developed and evaluated apps to help determine the freshness of fruits for the basic diet of physically healthy older adults. Therefore, we developed and evaluated a smartphone app for healthy older adults that determines whether apples, bananas, and oranges are rotten. Apples, bananas, and oranges are among the most consumed traditional fruits in South Korea and the United States [<xref ref-type="bibr" rid="ref27">27</xref>]. In addition, we could easily collect photos of rotten apples, bananas, and oranges, so we selected them as the target fruits of our app [<xref ref-type="bibr" rid="ref28">28</xref>]. <xref ref-type="boxed-text" rid="box1">Textbox 1</xref> shows the research questions of our study.</p>
        <p>By answering our research questions, we will contribute to older adults and the community of researchers. First, we proposed a smartphone app that supports older adults in avoiding consuming rotten fruits. Second, we revealed their perceptions about the app and showed them how to evaluate the app, which uses an artificial intelligence (AI) model. Third, we contributed to the related research community by revealing which of the 7 pretrained backbone networks we used to classify rotten fruits showed the best performance. To answer the research questions, we reviewed the relevant literature and developed and evaluated the performance of the app. To evaluate the app, we used an after-scenario questionnaire [<xref ref-type="bibr" rid="ref29">29</xref>] and conducted semistructured interviews [<xref ref-type="bibr" rid="ref30">30</xref>] with older adults.</p>
        <boxed-text id="box1" position="float">
          <title>Research questions.</title>
          <list list-type="bullet">
            <list-item>
              <p>Research question 1: What studies have previously evaluated and developed an app for healthy older adults to take pictures of food items with a smartphone camera to determine whether the items are rotten?</p>
            </list-item>
            <list-item>
              <p>Research question 2: Is it possible to make an app that classifies apples, bananas, and oranges with only their pictures taken with a smartphone camera?</p>
              <list>
                <list-item>
                  <p>Research question 2.1: How can we gather pictures that are needed to create a function for classifying apples, bananas, and oranges?</p>
                </list-item>
                <list-item>
                  <p>Research question 2.2: How can we create a function to determine whether apples, bananas, and oranges are rotten?</p>
                </list-item>
                <list-item>
                  <p>Research question 2.3: How can we create an app that can use the aforementioned functions?</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Research question 3: Is there a significant performance difference between functions created by various methods of classifying apples, bananas, and oranges?</p>
            </list-item>
            <list-item>
              <p>Research question 4: Is the app that determines whether an apple, a banana, or an orange is rotten only by using pictures taken with a smartphone camera easy to use?</p>
              <list>
                <list-item>
                  <p>Research question 4.1: Why are older adults satisfied with the process of using the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 4.2: Why are older adults dissatisfied with the process of using the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 4.3: Why are older adults satisfied with the time it takes to complete the task?</p>
                </list-item>
                <list-item>
                  <p>Research question 4.4: Why are older adults dissatisfied with the time taken to complete the task?</p>
                </list-item>
                <list-item>
                  <p>Research question 4.5: What makes older adults satisfied using the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 4.6: What makes older adults dissatisfied with using the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 4.7: What do older adults want from using the app?</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Research question 5: What are the perceptions about the app that determines whether an apple, a banana, or an orange is rotten?</p>
              <list>
                <list-item>
                  <p>Research question 5.1: Who are the potential users of the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 5.2: Why did older adults suggest them as potential users?</p>
                </list-item>
                <list-item>
                  <p>Research question 5.3: Why can older adults trust the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 5.4: Why can older adults not trust the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 5.5: What features would older adults like to add to the app?</p>
                </list-item>
                <list-item>
                  <p>Research question 5.6: Why do older adults use the app even if it is a paid app?</p>
                </list-item>
              </list>
            </list-item>
          </list>
        </boxed-text>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Design</title>
        <p>This study aimed to develop and evaluate a smartphone app that enables older adults to take pictures of food items with a smartphone camera and to determine whether the items are rotten. We first reviewed prior studies that have developed and evaluated similar spoilage detection tools. After reviewing the relevant literature, we trained a model that classifies the freshness of 3 fruits (ie, apples, bananas, and oranges) and evaluated the model’s performance. Next, we developed a smartphone app that uses the trained model to take images of fruits and classify them into images of fresh and rotten fruits. We recruited 23 older adult participants to evaluate the usability of the app and to determine their perceptions about the app.</p>
      </sec>
      <sec>
        <title>Literature Review</title>
        <p>The purpose of the literature review was to discover the limitations of prior studies that have developed and evaluated tools that classify food items into rotten and fresh items. To collect prior studies that have reported any developed apps determining food freshness by taking pictures of food items, we used a combination of keywords (eg, “rotten,” “food,” “collect,” and “app”) in Scopus, IEEE, the ACM Digital Library, PubMed, and EBSCO. We also used similar meanings for each keyword (eg, spoiled, corrupt, damaged, ruined, and expired for similar meanings of rotten) and linked them with “OR” and “AND” as query sentences (see <xref ref-type="boxed-text" rid="box2">Textbox 2</xref>). We exported each paper’s metadata, such as the title, year of publication, author, DOI, publisher, and keywords, and stored it to Google Spreadsheets. We used the PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses) flow diagram to manage duplicate papers between databases and to screen review papers (see <xref rid="figure1" ref-type="fig">Figure 1</xref>) [<xref ref-type="bibr" rid="ref31">31</xref>]. We excluded papers not meeting the eligibility criteria and found 19 eligible papers. After reviewing each paper to examine the research questions and objectives of prior studies, we found that no prior studies have reported how to support physically healthy older adults in determining the freshness of their diet using readily available equipment, such as a smartphone.</p>
        <boxed-text id="box2" position="float">
          <title>Search query used in Scopus.</title>
          <p>TITLE-ABS-KEY(((“spoiled” OR “rotten” OR “corrupt” OR “damaged” OR “ruined” OR “expired”) AND (“food” OR “cooking” OR “kitchen” OR “food” OR “food” OR “meals” OR “kitchens” OR “kitchens” OR “food” OR “meals”) AND (“pick” OR “pick” OR “take” OR “take” OR “detects” OR “detects” OR “identifies” OR “identifies” OR “recognizes” OR “recognizes”) AND (“app” OR “application” OR “system” OR “platform” OR “apps” OR “applications” OR “systems” OR “platforms”))).</p>
        </boxed-text>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>PRISMA flow diagram for reviewing prior studies. After deduplicating the papers in each database, we screened review papers and found 19 that met the eligibility criteria. PRISMA: Preferred Reporting Items for Systematic Reviews and Meta-Analyses.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e55342_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>App Development</title>
        <p>The objective of this study was to develop an app that determines whether an apple, a banana, or an orange is rotten just by taking a picture of the fruit using a smartphone camera. App development and evaluation procedures consisted of 4 steps: (1) collecting images, (2) training classification models, (3) developing an app, and (4) evaluating the models. First, we collected pictures required for training the food classification model. We collected pictures of fresh and rotten apples, bananas, and oranges from Kaggle, a platform that provides preprocessed data sets to data scientists: 2088 pictures of fresh apples, 1962 pictures of fresh bananas, 1854 pictures of fresh oranges, 2943 pictures of rotten apples, 2754 pictures of rotten bananas, and 1998 pictures of rotten oranges (see <xref rid="figure2" ref-type="fig">Figure 2</xref>). We uploaded and stored the collected pictures on the server for classification model training.</p>
        <p>Second, we trained food picture classification models. To use only the features of the fruit in the picture for training, we segmented the pixels of the fruit from the background using Otsu’s [<xref ref-type="bibr" rid="ref32">32</xref>] method. We stored the segmented pictures with the same size as the original pictures. When training the models, we used several pretrained residual networks (ResNets; eg, ResNet-50, ResNet-50V2, ResNet-101, ResNet-101V2, ResNet-152, ResNet-152V2, Inception-ResNet-V2) for feature extraction, reducing the time for model training and improving model performance [<xref ref-type="bibr" rid="ref33">33</xref>]. Among the parameters used in the single fully connected layer, we set the activation function among sigmoid, rectified linear unit (ReLU), and tanh. We set the dropout rate to 0, 0.1, 0.2, 0.3, 0.4, or 0.5 and the number of neurons to 64, 128, 256, 512, or 1025. We randomly selected 100 combinations of the activation functions, the dropout rate, and the number of neurons. Among the combinations, the layer combination that showed the best performance consisted of a dense (neurons=128, activation=sigmoid) layer, a dropout (drop rate=0.2) layer, a dense (neurons=128, activation = sigmoid) layer, and a dropout (drop rate=0) layer.</p>
        <p>Third, we developed a smartphone app that uses a classification model that classifies fruits in pictures into fresh or rotten fruit. Android Studio [<xref ref-type="bibr" rid="ref34">34</xref>] is the official integrated development environment for Android apps; the proportion of smartphone users in their sixties and older is steadily increasing, and the older they are, the more they use Android than iOS. Level 29 of the app is the minimum application programming interface (API) level that supports app download from Google Play, and the API level that can be operated on most smartphones was selected. We developed 3 functions: a function to take a picture using a smartphone camera, a function to input pictures into the classification function, and a function to show the classification result on the app screen. We created a low-fidelity prototype [<xref ref-type="bibr" rid="ref35">35</xref>] to determine user tasks and 3 features of the app (see <xref rid="figure3" ref-type="fig">Figure 3</xref>). We added a camera function and set permissions (eg, camera, internal storage) for the function that would enable users to take pictures. The pictures taken are resized to 256 × 256 pixels to fit the input size of the trained classification model. The resized pictures are input to the converted classification model with the .h5 extension. Third, the app takes the output array from the classification model that consists of the probability value and displays one of the following texts: “The apple is rotten,“ ”The apple is fresh,“ ”The banana is rotten,“ ”The banana is fresh,“ ”The orange is rotten,“ and ”The orange is fresh“ (see <xref rid="figure4" ref-type="fig">Figures 4</xref> and <xref rid="figure5" ref-type="fig">5</xref>). To perform these functions, we added several buttons to allow users to interact with the app.</p>
        <p>Lastly, we evaluated the performance of our trained models (eg, area under the curve [AUC], error rate). The model evaluation aimed to determine the best-performing model among the models developed using multiple backbone networks in the model training stage. To fix the metric to be used for model evaluation, we found the metric used to evaluate the classification model in each of the papers in the related area. A prior study used the <italic>F</italic><sub>1</sub>-score, balanced accuracy, the error rate, and the AUC to evaluate the classification model [<xref ref-type="bibr" rid="ref23">23</xref>]. After extracting the above metrics through 10-fold cross-validation, we found a significant difference between each model’s performance by using the Kruskal-Wallis test [<xref ref-type="bibr" rid="ref36">36</xref>], a method that can be used when n≤30 for ≥3 classes. We performed this test to check whether using different backbone networks makes significant differences in model performance.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>Pictures of apples, bananas, and oranges collected from Kaggle.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e55342_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Low-fidelity prototype of the app, intentionally sketched by hand to emphasize the conceptual stage of design. Each box represents the shape of the smartphone running the app. Arrows depict screen transitions triggered by user interactions with specific elements on the screen. Low-fidelity prototypes are basic visual representations that do not incorporate high details or functionalities but are essential for rapid iterations and facilitate early discussions among researchers on design concepts, making hand-drawn sketches an ideal format for such type of prototype.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e55342_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>The 4 main features of the app. When a user takes a picture of a fruit, the picture is classified by the classification model and returned to the user.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e55342_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Performance Evaluation</title>
        <p>We evaluated the app’s performance to answer research questions 4 and 5. We performed a performance evaluation of the classification model used in the app, a quantitative evaluation using an after-scenario questionnaire [<xref ref-type="bibr" rid="ref29">29</xref>], and a qualitative evaluation through semistructured interviews with the participants.</p>
        <sec>
          <title>Recruitment</title>
          <p>We recruited older adults, who are the target users of the app, as participants. We set eligibility criteria as follows: (1) 65 years of age or older, (2) those who have used a smartphone with a rear camera for at least 1 year, (3) those who have taken pictures with a smartphone at least once in the past 3 months, (4) those who could understand the contents of the questionnaire, (5) those who have lived in Korea for more than 5 years and can communicate in Korean, and (6) those who have not participated in this study before. However, we excluded participants unwilling to be research subjects or those with any chronic diseases [<xref ref-type="bibr" rid="ref37">37</xref>]. We recruited participants using 3 methods: (1) recruiting through acquaintances, (2) recruiting through an institution used by older adults, and (3) recruiting through an online community used by older adults. When recruiting participants through acquaintances, we informed people [<xref ref-type="bibr" rid="ref38">38</xref>] about the purpose of the study, the expected time required for the experiment, the eligibility of participants, and the reward. When recruiting through institutions, we collected the name, location, phone number, and email of the senior welfare center in Seoul and sent an email requesting to promote the experiment and share a promotional poster. When recruiting participants through an online community, we posted promotional posters by joining social network services, such as Open Kakao Talk, Naver Band, and Naver Cafe, which are thought to be used by older adults.</p>
        </sec>
        <sec>
          <title>Study Procedure</title>
          <p>We evaluated the usability of the app through an after-scenario questionnaire and semistructured interviews. We conducted an experiment by visiting each participant’s home. The researcher informed participants about the (1) purpose of the experiment, (2) purpose of using the app, (3) expected time required for the experiment, (4) function of the app, and (5) researcher (a brief introduction). After the participants agreed to get enrolled in the study, we instructed them about 3 tasks using the app: (1) run the app and review the screen, (2) a picture of a fruit using the app, and (3) review the text indicating whether the fruit in the picture taken is fresh. To conduct the experiment in the same environment as far as possible, we handed the smartphone with the app preinstalled to the participants. To minimize the impact of imaging angles, environments, illumination, cameras, and any other potential factors on the efficiency of classification, a standardized protocol was implemented across all experiments. For instance, each participant was asked to use the Samsung Galaxy Note 10 camera to photograph the fruits to ensure consistency in device specifications. The pictures were taken at a 45° angle under sufficiently bright light, such as in the kitchen or living room. Furthermore, by having participants align the fruits with the square gray lines shown in <xref rid="figure5" ref-type="fig">Figure 5</xref>, we ensured that the orientation and size of the fruits within the pictures did not affect the classification results. Each participant performed the given task with a fresh apple, a rotten apple, a fresh banana, a rotten banana, a fresh orange, and a rotten orange. When the participants completed their tasks, we asked them to fill out a questionnaire (see <xref ref-type="boxed-text" rid="box3">Textbox 3</xref>) and then interviewed them. The questionnaire consisted of questions regarding the experiences of the participants, considering the research questions, using a 5-point Likert scale ranging from 1 (strongly disagree) to 5 (strongly agree). When the interview was over, we stopped recording and paid Korean won 10,000 (approximately US $8) to each participant in cash. The questionnaire and the consent form were scanned and uploaded to Google Drive, along with the interview recording file. Finally, the interview recording file was transcribed, and we listened to the recording file again and corrected the script.</p>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>User interface screenshot. (A) Interface where the user takes a picture: (A)-1. Button to exit the app. (A)-2. Screen that shows what the camera is focusing on. (A)-3. Button to take a picture. (B) Interface for reviewing pictures taken by the user and deciding whether to use the pictures: (B)-1. Popup that asks the user whether to use a photo. (C) Interface for displaying the results of classifying the freshness of the fruit: (C)-1. Text that indicates the type and freshness of the photographed fruit. (C)-2. Button to return to the home screen of the app so that the user can take a picture of another fruit.</p>
            </caption>
            <graphic xlink:href="formative_v8i1e55342_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <boxed-text id="box3" position="float">
            <title>After-scenario questionnaire for study participants using a 5-point Likert scale.</title>
            <p>Questions to answer the research questions (1 for strongly disagree to 5 for strongly agree):</p>
            <list list-type="order">
              <list-item>
                <p>I think it was easy to touch the app icon and review the first screen of the app.</p>
              </list-item>
              <list-item>
                <p>I feel satisfied with the time it took to touch the app icon and review the app’s first screen.</p>
              </list-item>
              <list-item>
                <p>I think it was easy to take a picture of the fruit.</p>
              </list-item>
              <list-item>
                <p>I am satisfied with the time it took me to take a picture of the fruit.</p>
              </list-item>
              <list-item>
                <p>I think it was easy to review the text indicating whether the fruit was fresh.</p>
              </list-item>
              <list-item>
                <p>I feel satisfied with the time it took to review the text indicating whether the fruit was fresh.</p>
              </list-item>
              <list-item>
                <p>I trust the text indicating whether the fruit is fresh.</p>
              </list-item>
              <list-item>
                <p>Even if the app is paid, I plan to use it.</p>
              </list-item>
            </list>
          </boxed-text>
        </sec>
      </sec>
      <sec>
        <title>Data Analysis</title>
        <p>We quantitatively and qualitatively analyzed the participants’ responses. We removed every participant’s identification information and assigned them a new one (eg, P1, P2). Next, we calculated the questionnaire responses’ mean (SD) scores for quantitative analysis. For qualitative analysis, we used open-coding methods that highlighted meaningful remarks within scripts. We highlighted statements that included important information (eg, participants’ experiences, preferences) in each script [<xref ref-type="bibr" rid="ref39">39</xref>]. For each highlighted remark, we assigned a label to summarize the remark. We printed all the labels and inductively grouped them to extract important themes that answered our research questions (see <xref rid="figure6" ref-type="fig">Figure 6</xref>) [<xref ref-type="bibr" rid="ref40">40</xref>]. For qualitative research, affinity diagramming, a technique using sticky notes, is a prevalent practice due to its inherent flexibility and tangibility, which facilitates the organization of thoughts and findings in a spatially representative way. The ability to physically manipulate and rearrange data points is a key advantage of affinity diagramming, fostering deeper thematic analysis and the identification of emergent relationships and patterns. This technique also promotes collaborative discussions among researchers by providing a clear visual representation of the data analysis process.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Qualitative data analysis process, a vital step in numerous research methodologies. White notes represent codes written by the researcher for excerpts from interview transcripts. These codes highlight key ideas or concepts within the data. Yellow notes signify emergent themes identified through an initial coding stage. Here, similar or related codes are grouped together to synthesize broader patterns within the data. Red notes denote overarching themes or categories derived from a subsequent grouping of the yellow notes. This final stage reflects a higher level of data abstractions, moving from specific codes to more general thematic constructs.</p>
          </caption>
          <graphic xlink:href="formative_v8i1e55342_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>All experimental procedures were approved by the Institutional Review Board (IRB) of the University of Seoul (IRB ID: 2021-06-001-001). Prior to the experiment, each participant provided informed consent by signing a consent form about participating in the study and recording the interview, which also included a clause allowing the use of collected data for secondary analyses without additional consent. To protect the privacy of collected data, all participant data were anonymized and stored on a secure drive, which only authorized personnel have access to. After completing the interviews, all participants received a cash compensation of South Korean won 10,000 (approximately US $8) for their participation.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Participant Demographics</title>
        <p><xref ref-type="table" rid="table1">Table 1</xref> shows the details of the 26 participants enrolled in this study.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Participant (N=26) demographics.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="150"/>
            <col width="100"/>
            <col width="110"/>
            <col width="310"/>
            <col width="330"/>
            <thead>
              <tr valign="top">
                <td>Participant ID</td>
                <td>Age (years)</td>
                <td>Gender</td>
                <td>Take pictures of food often (1: strongly disagree to 5: strongly agree)</td>
                <td>Usually check that food is fresh before eating it (1: strongly disagree to 5: strongly agree)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>P1</td>
                <td>65</td>
                <td>Female</td>
                <td>Not reported</td>
                <td>Not reported</td>
              </tr>
              <tr valign="top">
                <td>P2</td>
                <td>70</td>
                <td>Male</td>
                <td>Not reported</td>
                <td>Not reported</td>
              </tr>
              <tr valign="top">
                <td>P3</td>
                <td>66</td>
                <td>Female</td>
                <td>Not reported</td>
                <td>Not reported</td>
              </tr>
              <tr valign="top">
                <td>P4</td>
                <td>66</td>
                <td>Female</td>
                <td>Not reported</td>
                <td>Not reported</td>
              </tr>
              <tr valign="top">
                <td>P5</td>
                <td>65</td>
                <td>Male</td>
                <td>Not reported</td>
                <td>Not reported</td>
              </tr>
              <tr valign="top">
                <td>P6</td>
                <td>69</td>
                <td>Male</td>
                <td>1</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P7</td>
                <td>73</td>
                <td>Female</td>
                <td>2</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P8</td>
                <td>73</td>
                <td>Female</td>
                <td>2</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P9</td>
                <td>65</td>
                <td>Female</td>
                <td>2</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P10</td>
                <td>65</td>
                <td>Male</td>
                <td>1</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P11</td>
                <td>74</td>
                <td>Male</td>
                <td>3</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P12</td>
                <td>69</td>
                <td>Female</td>
                <td>1</td>
                <td>3</td>
              </tr>
              <tr valign="top">
                <td>P13</td>
                <td>69</td>
                <td>Male</td>
                <td>2</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P14</td>
                <td>65</td>
                <td>Male</td>
                <td>1</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P15</td>
                <td>69</td>
                <td>Male</td>
                <td>2</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P16</td>
                <td>76</td>
                <td>Male</td>
                <td>5</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P17</td>
                <td>69</td>
                <td>Male</td>
                <td>1</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P18</td>
                <td>76</td>
                <td>Male</td>
                <td>1</td>
                <td>1</td>
              </tr>
              <tr valign="top">
                <td>P19</td>
                <td>67</td>
                <td>Female</td>
                <td>1</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P20</td>
                <td>72</td>
                <td>Male</td>
                <td>1</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P21</td>
                <td>68</td>
                <td>Female</td>
                <td>3</td>
                <td>5</td>
              </tr>
              <tr valign="top">
                <td>P22</td>
                <td>70</td>
                <td>Male</td>
                <td>3</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P23</td>
                <td>65</td>
                <td>Male</td>
                <td>1</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P24</td>
                <td>66</td>
                <td>Female</td>
                <td>3</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P25</td>
                <td>68</td>
                <td>Male</td>
                <td>3</td>
                <td>4</td>
              </tr>
              <tr valign="top">
                <td>P26</td>
                <td>66</td>
                <td>Female</td>
                <td>3</td>
                <td>5</td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Model Performance, Survey Results, and Interview Results</title>
        <p>The results of our study were classified into (1) performance evaluation results of the trained model during app development, (2) quantitative analysis results of responses collected through the survey, and (3) qualitative analysis results of responses collected through interviews.</p>
        <sec>
          <title>Model Performance</title>
          <p>We trained each classification model using 7 ResNets as a backbone and compared their performance using 10-fold cross-validation to review whether there was a significant difference in performance. ResNet-101 showed the highest performance, on average (see <xref ref-type="table" rid="table2">Table 2</xref>). In this study, the optimal architecture for the fully connected layers was identified as a sequence containing a dense layer with 128 neurons and sigmoid activation, a dropout layer with a drop rate of 0.2, and another dense layer with 128 neurons also using sigmoid activation. The final layer in the sequence was a dense layer without dropout. The learning rate was set to 0.001. The training process was configured with 1000 epochs, incorporating an early stopping mechanism with a patience parameter of 20. Whether the model trained using ResNet-101 and the rest of the backbone network showed a significant performance difference was verified using the Kruskal-Wallis test [<xref ref-type="bibr" rid="ref36">36</xref>]. As a result, ResNet-101 showed a significant performance difference from ResNet-50, ResNet-50V2, ResNet-101V2, and Inception-ResNet-V2, but there was no significant difference between ResNet-152 and ResNet-152V2. Therefore, although the trained model using ResNet-101 showed the highest performance among the 7 backbone networks, there was no significant performance difference when compared with ResNet-152 and ResNet-152V2.</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>AUC<sup>a</sup>, error rate, balanced accuracy, and <italic>F</italic><sub>1</sub>-score of ResNet<sup>b</sup>-101 after training the model using each backbone network with 10-fold cross-validation.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="320"/>
              <col width="160"/>
              <col width="170"/>
              <col width="190"/>
              <col width="160"/>
              <thead>
                <tr valign="top">
                  <td>Backbone</td>
                  <td>AUC</td>
                  <td>Error rate</td>
                  <td>Balanced accuracy</td>
                  <td><italic>F</italic><sub>1</sub>-score</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>ResNet-50</td>
                  <td>0.9533</td>
                  <td>0.3589</td>
                  <td>0.7523</td>
                  <td>0.7615</td>
                </tr>
                <tr valign="top">
                  <td>ResNet-50V2</td>
                  <td>0.9456</td>
                  <td>0.3591</td>
                  <td>0.7409</td>
                  <td>0.7495</td>
                </tr>
                <tr valign="top">
                  <td>ResNet-101</td>
                  <td>0.9663<sup>c</sup></td>
                  <td>0.3216<sup>c</sup></td>
                  <td>0.7949<sup>c</sup></td>
                  <td>0.8003<sup>c</sup></td>
                </tr>
                <tr valign="top">
                  <td>ResNet-101V2</td>
                  <td>0.9467</td>
                  <td>0.3533</td>
                  <td>0.7403</td>
                  <td>0.7494</td>
                </tr>
                <tr valign="top">
                  <td>ResNet-152</td>
                  <td>0.9607</td>
                  <td>0.3475</td>
                  <td>0.7783</td>
                  <td>0.7833</td>
                </tr>
                <tr valign="top">
                  <td>ResNet-152V2</td>
                  <td>0.9606</td>
                  <td>0.3250</td>
                  <td>0.7806</td>
                  <td>0.7884</td>
                </tr>
                <tr valign="top">
                  <td>Inception-ResNet-V2</td>
                  <td>0.9210</td>
                  <td>0.4097</td>
                  <td>0.6821</td>
                  <td>0.6932</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table2fn1">
                <p><sup>a</sup>AUC: area under the curve.</p>
              </fn>
              <fn id="table2fn2">
                <p><sup>b</sup>ResNet: residual network.</p>
              </fn>
              <fn id="table2fn3">
                <p><sup>c</sup>The highest performance shown by ResNet-101.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Survey Results</title>
          <p>We obtained the mean (SD) of the demographic information and the participants’ responses to each survey question. A total of 32 responses were obtained; however, 6 (23.1%) participants (P3-P5, P7, P10, and P11) did not meet the criteria for participation, so their responses were excluded. According to the demographic information shown in <xref ref-type="table" rid="table1">Table 1</xref>, the mean age of the 26 (100%) participants was 68.69 (SD 3.47) years, of which 15 (57.7%) participants were male and 11 (42.3%) were female. The participants generally had a positive perception of the app (see <xref ref-type="table" rid="table3">Table 3</xref>). They generally agreed that it was easy to run the app and review its first screen, and they were satisfied with the time it took to run the app and review the first screen. In addition, they thought that taking pictures of fruits is easy, and they were satisfied with the time required to take a picture. They also thought that reviewing the text indicating whether the fruit is fresh is easy, they were satisfied with the time duration from taking a picture to reviewing the text, and they trusted the result displayed on the app. However, they did not intend to use the app if it is a paid app. According to the results, the freshness of fruits can be efficiently reviewed with the app.</p>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Survey results showing the perceptions of older adults about the app.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="140"/>
              <col width="720"/>
              <col width="140"/>
              <thead>
                <tr valign="top">
                  <td>Question number</td>
                  <td>Survey question (1: strongly disagree to 5: strongly agree)</td>
                  <td>Mean (SD)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>1</td>
                  <td>Do you agree that running the app and reviewing its first screen is easy?</td>
                  <td>4.58 (0.70)</td>
                </tr>
                <tr valign="top">
                  <td>2</td>
                  <td>Do you agree that you were satisfied with the time it takes to run the app and review its first screen?</td>
                  <td>4.77 (0.51)</td>
                </tr>
                <tr valign="top">
                  <td>3</td>
                  <td>Do you agree that taking pictures of fruits is easy?</td>
                  <td>4.81 (0.49)</td>
                </tr>
                <tr valign="top">
                  <td>4</td>
                  <td>Do you agree that you were satisfied with the time required to take a picture of a fruit?</td>
                  <td>4.81 (0.40)</td>
                </tr>
                <tr valign="top">
                  <td>5</td>
                  <td>Do you agree that reviewing the text indicating whether the fruit is fresh is easy?</td>
                  <td>4.73 (0.67)</td>
                </tr>
                <tr valign="top">
                  <td>6</td>
                  <td>Do you agree that you were satisfied with the time duration from taking a picture of a fruit to reviewing the text indicating whether the fruit is fresh?</td>
                  <td>4.88 (0.33)</td>
                </tr>
                <tr valign="top">
                  <td>7</td>
                  <td>Do you agree that you trust the result displayed on the app?</td>
                  <td>4.38 (0.90)</td>
                </tr>
                <tr valign="top">
                  <td>8</td>
                  <td>Do you agree that you intend to use the app even if you have to pay for it?</td>
                  <td>2.73 (1.28)</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
        </sec>
        <sec>
          <title>Interview Results</title>
          <p>We analyzed the interview results qualitatively to answer research questions 4 and 5:</p>
          <list list-type="bullet">
            <list-item>
              <p>Research question 4.1: Why are older adults satisfied with the process of using the app?</p>
            </list-item>
            <list-item>
              <p>Research question 4.2: Why are older adults dissatisfied with the process of using the app?</p>
            </list-item>
            <list-item>
              <p>Research question 4.3: Why are older adults satisfied with the time it takes to complete the task?</p>
            </list-item>
            <list-item>
              <p>Research question 4.4: Why are older adults dissatisfied with the time taken to complete the task?</p>
            </list-item>
            <list-item>
              <p>Research question 4.5: What makes older adults satisfied using the app?</p>
            </list-item>
            <list-item>
              <p>Research question 4.6: What makes older adults dissatisfied with using the app?</p>
            </list-item>
            <list-item>
              <p>Research question 5.1: Who are the potential users of the app?</p>
            </list-item>
            <list-item>
              <p>Research question 5.2: Why did older adults suggest them as potential users?</p>
            </list-item>
            <list-item>
              <p>Research question 5.3: Why can older adults trust the app?</p>
            </list-item>
            <list-item>
              <p>Research question 5.4: Why can older adults not trust the app?</p>
            </list-item>
            <list-item>
              <p>Research question 5.5: What features would older adults like to add to the app?</p>
            </list-item>
            <list-item>
              <p>Research question 5.6: Why do older adults use the app even if it is a paid app?</p>
            </list-item>
          </list>
          <p>Since no older adults responded that they were dissatisfied with the time taken to use the app, we excluded the answer to research question 4.4 from the results.</p>
        </sec>
        <sec>
          <title>Reasons Why Older Adults Are Satisfied or Dissatisfied With the Process of Using the App</title>
          <p>Participants stated that there are several reasons why they were satisfied with the process of using the app. For example, 10 (38.5%) participants reported that the app is simple and easy to use:</p>
          <disp-quote>
            <p>I do not have to do something in several steps, I just take pictures.</p>
            <attrib>P10</attrib>
          </disp-quote>
          <p>Similarly, 7 (26.9%) participants responded that they had no difficulty taking pictures:</p>
          <disp-quote>
            <p>It was not difficult for me to take pictures using the app.</p>
            <attrib>P22</attrib>
          </disp-quote>
          <p>Six participants said that it is not difficult to use the app as the description is sufficient:</p>
          <disp-quote>
            <p>The description of the app was very helpful. Without this explanation, I would have taken the wrong picture, and I do not think I would get the desired result.</p>
            <attrib>P12</attrib>
          </disp-quote>
          <p>Four participants responded that the app works fast, so they immediately saw a text indicating the freshness of the fruit on the app:</p>
          <disp-quote>
            <p>As soon as I took a picture, the text was immediately visible, so I could check whether it was fresh or not, which was nice.</p>
            <attrib>P22</attrib>
          </disp-quote>
          <p>Two participants said they were satisfied with the interface:</p>
          <disp-quote>
            <p>I think I can give it almost 90 out of 100 points for touching and reviewing the screen of the app. The screen composition is almost 90 points.</p>
            <attrib>P11</attrib>
          </disp-quote>
          <p>However, some participants still found it difficult to take pictures or check fruit freshness. For example, 2 (7.7%) participants reported that taking pictures using the app is difficult:</p>
          <disp-quote>
            <p>It would be much better if I could just take pictures, whether from close or far away. But when I tried to fit the object inside the square, my hand was shaking, which made it difficult to take the photo.</p>
            <attrib>P12</attrib>
          </disp-quote>
          <p>Two other participants reported that they could not quickly check the text indicating freshness:</p>
          <disp-quote>
            <p>The speed at which I can check whether the fruit is fresh or not will still be around 3 out of 10. I could not check it right away.</p>
            <attrib>P3</attrib>
          </disp-quote>
        </sec>
        <sec>
          <title>Reasons Why Older Adults Are Satisfied With the Time It Takes to Use the App</title>
          <p>Participants were generally satisfied with the time it took to use the app. For example, 13 (50%) participants said that it does not take long to use the app:</p>
          <disp-quote>
            <p>When I touched the app, it did not feel like it took that long, such as buffering, and the screen popped up immediately.</p>
            <attrib>P23</attrib>
          </disp-quote>
          <p>Two participants responded that they felt it is easy to use the app:</p>
          <disp-quote>
            <p>I felt satisfied because I understood it quickly.</p>
            <attrib>P7</attrib>
          </disp-quote>
          <p>One participant said that she was satisfied with using the app because it actually takes a long time:</p>
          <disp-quote>
            <p>I liked being able to use the app at my own pace rather than using it quickly.</p>
            <attrib>P3</attrib>
          </disp-quote>
        </sec>
        <sec>
          <title>Reasons Why Older Adults Are Satisfied or Dissatisfied With Using the App</title>
          <p>Participants responded that they were satisfied with the app because it is visually pleasing and simple. For example, 16 (61.5%) participants reported that the interface of the app is visually satisfactory:</p>
          <disp-quote>
            <p>It was easier because the font size was okay and the freshness was simply indicated.</p>
            <attrib>P14</attrib>
          </disp-quote>
          <p>In addition, 8 (30.8%) participants reported that they had no difficulty taking pictures using the app:</p>
          <disp-quote>
            <p>The app is so easy to use because I can take a picture right away. When taking pictures of distant objects with a general camera, I need to adjust the distance and adjust the direction, but with this app, it was easy.</p>
            <attrib>P15</attrib>
          </disp-quote>
          <p>Six participants responded that the process is simple and easy to use:</p>
          <disp-quote>
            <p>This app was uncomplicated and easy to use.</p>
            <attrib>P15</attrib>
          </disp-quote>
          <p>In addition, 2 (7.7%) participants said that the app is easy to use because it does not require any authentication, 2 (7.7%) participants said they liked being able to check freshness using the app, 2 (7.7%) said that the app could provide health-related information, and 2 (7.7%) responded that they enjoyed using the app:</p>
          <disp-quote>
            <p>For example, it does not require authentication or anything like that, just taking a picture.</p>
            <attrib>P10</attrib>
          </disp-quote>
          <disp-quote>
            <p>I think that using the app is easier than tasting or smelling or anything like that. In order to check the smell or taste, I have to come in direct contact with the food now, so there is such an inconvenience.</p>
            <attrib>P14</attrib>
          </disp-quote>
          <disp-quote>
            <p>The most important thing is to check things related to my health.</p>
            <attrib>P2</attrib>
          </disp-quote>
          <disp-quote>
            <p>With my children, I check whether the fruit is rotten or not, and there are things like that. That could be fun.</p>
            <attrib>P15</attrib>
          </disp-quote>
          <p>However, some participants reported that they had difficulty using the camera or that the app is too simplistic. For example, 7 (26.9%) participants responded that they felt troublesome while using the app:</p>
          <disp-quote>
            <p>When we look at the fruit with our own eyes, we clearly identify whether it is fresh. Other methods make me inconvenience.</p>
            <attrib>P9</attrib>
          </disp-quote>
          <p>Four participants responded that the interface is still small:</p>
          <disp-quote>
            <p>It would be better if the camera screen were bigger.</p>
            <attrib>P17</attrib>
          </disp-quote>
          <p>Furthermore, 2 (7.7%) participants said that using the camera is uncomfortable, another 2 (7.7%) said the app’s design is too simple, and 2 (7.7%) said the app’s functionality is too simple:</p>
          <disp-quote>
            <p>The app was similar to the camera, but the camera app is better because it is good to focus on an object.</p>
            <attrib>P13</attrib>
          </disp-quote>
          <disp-quote>
            <p>I think the app needs to be a little more sophisticated. The elements are so angular and seem like a little basic app.</p>
            <attrib>P16</attrib>
          </disp-quote>
          <disp-quote>
            <p>The app is too simple. I have no intention of using the app because of the few features.</p>
            <attrib>P5</attrib>
          </disp-quote>
          <p>One participant said that using the app might appear to question the freshness of the food items:</p>
          <disp-quote>
            <p>I think it would be a bit strange to take a picture while having a meal together. But if the app has become a bit more popular, I think it is okay to try it once.</p>
            <attrib>P22</attrib>
          </disp-quote>
        </sec>
        <sec>
          <title>What Older Adults Want From Using the App</title>
          <p>Participants wanted the app to inform them of fruit freshness in various ways, and they also wanted the app’s detection accuracy to be improved. For example, 10 (38.5%) participants said they wanted to learn more about using the app:</p>
          <disp-quote>
            <p>It would be nice to check the freshness of not only fruits but also the kinds of herbs and various food items.</p>
            <attrib>P15</attrib>
          </disp-quote>
          <p>Seven participants said they wanted the app to be easier to use:</p>
          <disp-quote>
            <p>If the fruit pictured here is rotten, I think the result should be displayed in red, or if it is fresh, it should be displayed in blue. It is fine now, but would it not be better if the app guided me with red, blue, and yellow colors?</p>
            <attrib>P25</attrib>
          </disp-quote>
          <p>Three participants said they wanted the app to give accurate freshness:</p>
          <disp-quote>
            <p>I think the results are inaccurate. I will use it if the accuracy is higher.</p>
            <attrib>P9</attrib>
          </disp-quote>
          <p>Two participants said they wanted to check freshness in a variety of ways:</p>
          <disp-quote>
            <p>The app only checks the freshness by the shape of the fruit. Can we add a function using a smell or something like a taste?</p>
            <attrib>P20</attrib>
          </disp-quote>
          <p>One participant said that they would like to see an additional way to earn points as they use the app:</p>
          <disp-quote>
            <p>It would be nice if there were merits, such as earning points the more I use the app.</p>
            <attrib>P2</attrib>
          </disp-quote>
          <p>In addition, participants suggested some features for the app. They wanted it to be easier to take pictures or to obtain more information about the fruit. For example, 3 (11.5%) participants said that they would like the app to automatically focus on the fruit when they take a picture of a fruit:</p>
          <disp-quote>
            <p>Rather than focusing the camera on an object, it will be convenient to know the result just by being on the screen, no matter how the camera captures the fruit.</p>
            <attrib>P5</attrib>
          </disp-quote>
          <p>Two participants said they would like to add a feature that provides more information about the fruits they have photographed:</p>
          <disp-quote>
            <p>So, if I just take a picture of the fruit, the app has to tell me all the information about the fruit.</p>
            <attrib>P5</attrib>
          </disp-quote>
        </sec>
        <sec>
          <title>Potential Users of the App</title>
          <p>Participants said that they would be able to recommend the app to their acquaintances, homemakers, and even young people. For example, 4 (15.4%) participants said that their acquaintances could use the app, while 1 (3.8%) said that they could recommend the app to homemakers:</p>
          <disp-quote>
            <p>I will try to share it with the church’s female teachers and priests of my age.</p>
            <attrib>P17</attrib>
          </disp-quote>
          <disp-quote>
            <p>I think homemakers will use it a lot.</p>
            <attrib>P23</attrib>
          </disp-quote>
          <p>Four participants said that they could recommend the app to people over the age of 50 years, including older adults:</p>
          <disp-quote>
            <p>I think people who are as old as us or older can try it.</p>
            <attrib>P12</attrib>
          </disp-quote>
          <p>In contrast, 2 (7.7%) participants said that they could recommend the app to younger people:</p>
          <disp-quote>
            <p>In my opinion, young people in their twenties and thirties have no experience checking the freshness, so that the app may be widespread.</p>
            <attrib>P21</attrib>
          </disp-quote>
          <p>Three participants said that using an app to check freshness at a fruit shop would give customers peace of mind:</p>
          <disp-quote>
            <p>Individuals, of course, need to check the freshness at home, but a fruit store asks customers to check the freshness of the fruit using an app, and if the fruit is fresh, then customers can buy with confidence.</p>
            <attrib>P20</attrib>
          </disp-quote>
          <p>One participant said that she could recommend the app to health-conscious people:</p>
          <disp-quote>
            <p>I can recommend an app to anyone sensitive or unusually picky about health and food items.</p>
            <attrib>P4</attrib>
          </disp-quote>
          <p>In contrast, 1 (3.8%) participant said that they did not intend to recommend the app to others:</p>
          <disp-quote>
            <p>I do not intend even to recommend the app to others.</p>
            <attrib>P10</attrib>
          </disp-quote>
          <p>Participants said that potential users of the app would quickly learn how to use it. For example, 7 (26.9%) participants said the app is simple, which would make it easier for potential users to use it:</p>
          <disp-quote>
            <p>Rather than saying that this is easy to learn, is it not that everyone can do it right away? Because the app is very simple and easy. I think this can be done whenever a person feels it is necessary without the need to learn anything.</p>
            <attrib>P20</attrib>
          </disp-quote>
          <p>Three participants said that potential users would be able to use the app to purchase fresh food items:</p>
          <disp-quote>
            <p>Since homemakers buy the most ingredients at the mart, I can recommend it to housewives.</p>
            <attrib>P23</attrib>
          </disp-quote>
          <p>Two participants said that potential users would be able to use the app to tell whether a food item is visually fresh:</p>
          <disp-quote>
            <p>When they say they are unsure if the fruit is fresh by looking at it, would it not be possible to check it through the app?</p>
            <attrib>P19</attrib>
          </disp-quote>
          <p>One participant said that if there is a good app, it should be shared with potential users, while another said that a potential participant would be interested in trying the app out of curiosity:</p>
          <disp-quote>
            <p>The good thing is that we all have to share.</p>
            <attrib>P16</attrib>
          </disp-quote>
          <disp-quote>
            <p>I will probably try it at a mart or a fruit store mainly out of curiosity.</p>
            <attrib>P21</attrib>
          </disp-quote>
        </sec>
        <sec>
          <title>Why Older Adults Trust or Distrust the App</title>
          <p>Participants were less skeptical about the app and tended to believe it, and only a small fraction of participants said they would compare it with their own thoughts. For example, 9 (34.6%) participants said that they could trust the app because it is just an app to tell them about freshness:</p>
          <disp-quote>
            <p>I did not think about why, and if I do not trust my cell phone, what do I believe? I just believed it.</p>
            <attrib>P7</attrib>
          </disp-quote>
          <p>In addition, 2 (7.7%) participants said that they could trust the app if they used it frequently, while another 2 (7.7%) said that the results were the same as they thought:</p>
          <disp-quote>
            <p>If I use the app once or twice, and the results are correct, then I should trust the app.</p>
            <attrib>P15</attrib>
          </disp-quote>
          <disp-quote>
            <p>When I look at the fruit, if it looks like the fruit is rotten and matches the result, then I can trust the app.</p>
            <attrib>P16</attrib>
          </disp-quote>
          <p>However, 7 (26.9%) participants said they trusted their senses more than the app:</p>
          <disp-quote>
            <p>Would it not be more accurate to check with my own eyes rather than the app?</p>
            <attrib>P19</attrib>
          </disp-quote>
          <p>Three participants responded that their thoughts on the freshness of the fruits and the results were different:</p>
          <disp-quote>
            <p>In my opinion, the fruit is not fresh, but the app says the fruit is fresh.</p>
            <attrib>P9</attrib>
          </disp-quote>
          <p>Six participants said that they trusted their senses more because they had not used the app much:</p>
          <disp-quote>
            <p>If the results are certain when I use the app, I will believe it from now on, but this is the first time I have used it.</p>
            <attrib>P15</attrib>
          </disp-quote>
        </sec>
        <sec>
          <title>Why Older Adults Would Use or Not Use the App If They Need to Pay</title>
          <p>Most of the participants said they would not use paid apps and were reluctant to pay. For example, 8 (30.1%) participants said that they did not feel the need to use a paid app:</p>
          <disp-quote>
            <p>I do not think I need to use an app that requires money.</p>
            <attrib>P20</attrib>
          </disp-quote>
          <p>Four participants responded that they found it difficult to use the app:</p>
          <disp-quote>
            <p>It is inconvenient to use such an app; I do not know whether it is paid or free, and I do not need it, too.</p>
            <attrib>P18</attrib>
          </disp-quote>
          <p>Furthermore, 3 (11.5%) participants said they prefer free apps, while 2 (7.7%) said that using paid apps requires money:</p>
          <disp-quote>
            <p>So, first of all, it is better to be free.</p>
            <attrib>P10</attrib>
          </disp-quote>
          <disp-quote>
            <p>Older adults prefer to save money rather than not believe the app.</p>
            <attrib>P7</attrib>
          </disp-quote>
          <p>In contrast, 8 (30.1%) participants responded that if they needed the app, they would pay to use it, depending on the situation:</p>
          <disp-quote>
            <p>If I thought I really needed the app, I would use it often.</p>
            <attrib>P22</attrib>
          </disp-quote>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The results of this study showed the behaviors and perceptions of older adult participants when using our app. The survey responses revealed their positive perceptions about the app’s ease of use and their satisfaction with the time taken to complete tasks. However, the survey also showed a low willingness to pay for the service among older adults. Interviews provided further insights, revealing participants’ appreciation for the app’s usability, rapid result generation, and straightforward interface. Conversely, some participants expressed a desire for more features, while others raised concerns regarding trust in the app. In the following sections, we discuss findings based on the results, the limitations of our study, and future work.</p>
        <sec>
          <title>Factors Affecting Satisfaction When Using the App</title>
          <p>Some participants mentioned that they were satisfied, even though it took a long time to review the freshness of rotten fruits using a smartphone app. In general, it is thought that the shorter the time it takes for users, the more satisfied they will be; however, 1 participant said that the longer it takes, the more they feel satisfaction. That is, when older adults use mobile health apps for health management purposes, the factors that have the greatest influence on them differ with time. According to a study that found the dissatisfaction of older adults who watch YouTube using smartphones, it is the data plan that older adults feel most dissatisfied with [<xref ref-type="bibr" rid="ref41">41</xref>]. This finding is similar to our results. When older adults use mobile health apps, the most unsatisfactory thing for them may be that they need to pay costs or equivalents. We found that some older adults enjoy the process of using apps rather than reaching their own goal. According to a study, the reasons why older people use YouTube are to gain knowledge about political and social issues, travel information, food recipes, and health information [<xref ref-type="bibr" rid="ref41">41</xref>]. In other words, it can be seen that when older adults use a specific app, they can also enjoy the process of achieving that purpose. Harris et al [<xref ref-type="bibr" rid="ref42">42</xref>] suggested that perceived usefulness, ease of use, and enjoyment are significant motivators for older adults to adopt new technologies. Consequently, the enjoyment derived from using smartphone apps may potentially mitigate the initial hurdles faced by this group in technology adoption. Additionally, clear instruction materials and reduced costs have been identified as further facilitators for older adult engagement with deep learning and smartphone-based technologies [<xref ref-type="bibr" rid="ref42">42</xref>].</p>
        </sec>
        <sec>
          <title>Potential Users of the App</title>
          <p>Some participants responded that an app for classifying fruit freshness would be necessary for young people who lack experience in distinguishing rotten fruits by sense, instead of older adults. These young people may consume rotten food items due to their lack of experience in checking freshness, even if their cognitive function is normal. According to a study on the kinds of bacteria that cause food poisoning and are found on the hands of elementary school students, coliforms were detected the most [<xref ref-type="bibr" rid="ref43">43</xref>]. Based on this, it seems that the age groups most likely to experience health problems due to food poisoning are students rather than older adults, so they may be potential users of the app.</p>
        </sec>
        <sec>
          <title>Factors Affecting Participants’ Confidence in the App</title>
          <p>Some participants responded that they trusted the app results because it was an app running on a smartphone, contrary to our belief that older adult users would need something to convince them to trust the app. We discovered that some people might not need a reason to believe and use something. A study examining the effect of trust in technology on the adoption of technology before revealing why older adults trust and use an app without any rationale showed that an app’s background factors greatly influence the adoption of technology [<xref ref-type="bibr" rid="ref44">44</xref>]. Likewise, we assumed that some older adults who used the app without evidence would not just trust the app but also another technology that they think will work behind the smartphone’s system.</p>
        </sec>
      </sec>
      <sec>
        <title>Limitations and Future Work</title>
        <p>We developed an app that detects rotten fruit for the health management of older adults and investigated their experiences from various aspects. However, future work is still needed to gain deeper insight into the topics discussed. First, we developed a smartphone app that determines whether a fruit is fresh when older adults take a picture of it, but the app does not include a function to distinguish the freshness of food items other than the 3 fruits (apples, bananas, and oranges). According to the results, 8 participants wanted to know the freshness of food items other than the 3 fruits. It might be important to determine the freshness of a wider variety of food items, such as vegetables, herbs, and meat, to contribute to preventing older adults from experiencing health problems due to eating rotten food items. However, to determine the freshness of food items other than the 3 fruits, a large number of images are required, which requires a lot of time and resources. According to a previous study that proposed a visual-based method for detecting rotten food items, rotten food items have common visual characteristics [<xref ref-type="bibr" rid="ref45">45</xref>], using which might enable the determination of the freshness of more diverse food items without collecting corresponding food image data.</p>
        <p>Second, we used 7 ResNets as backbone networks to create a model that can distinguish the freshness of 3 fruits. We selected ResNet-101 with the highest classification performance among the 7 networks. However, we did not use several other backbone networks. Using more diverse backbone networks, including the recently proposed backbone network, might improve the performance of the model. Recently proposed models in a classification problem using ImageNet showed more than 10% higher accuracy than the backbone network used in our study [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>]. The performance of classifying rotten food images might be greatly improved using the recently proposed network.</p>
        <p>Third, we analyzed the collected interview scripts using open coding, but 2 or more researchers did not participate in this step. Participation of more than 2 open coders is required to enrich the quality of the results and reduce the bias that may occur when a single coder is performing the analysis.</p>
        <p>Regarding future work, first, to help older adults use the app for determining the freshness of various food items and not eat rotten food, researchers need to add a function that distinguishes the freshness of food items other than the 3 fruits. Second, to enable older adults using the app to more accurately check the freshness of food items, researchers may adopt recently developed networks that show high performance in image classification as backbone networks. Researchers may use a pretrained image processing transformer [<xref ref-type="bibr" rid="ref48">48</xref>] by fine-tuning it with the image data we collected. We may also extend the possible food categories by using generalized zero-shot learning [<xref ref-type="bibr" rid="ref49">49</xref>]. Third, for app improvement, data collected from a questionnaire and interviews should be analyzed by 2 or more coders to guarantee the reliability of the findings of this study.</p>
      </sec>
      <sec>
        <title>Conclusion</title>
        <p>The goal of this study was to help older adults avoid eating any rotten food items and not suffer from health problems. We established research questions and goals based on the limitations of prior studies where researchers created and evaluated tools for detecting rotten food items. To achieve the goals, we found the highest-performing classification model among various backbone networks by training a model to determine whether the targeted fruit was rotten. The trained model was used for developing a smartphone app. The findings of this study with older adult participants revealed that the usability of the app and the older adults’ perceptions about the app are positive, but they tend to feel reluctant to use the app if they need to pay for it. We also found how to design and evaluate an app using AI models targeting older adults by uncovering their perceptions about the app. Our study contributes to the research community by revealing which of the 7 pretrained backbone networks shows the highest performance on the ImageNet classification problem for determining whether the targeted fruit is rotten. We hope our proposed app enables older adults to identify rotten food items efficiently for maintaining their health.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">API</term>
          <def>
            <p>application programming interface</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">AUC</term>
          <def>
            <p>area under the curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">CNN</term>
          <def>
            <p>convolutional neural network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">FTIR</term>
          <def>
            <p>Fourier transform infrared spectroscopy</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">PRISMA</term>
          <def>
            <p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">ResNet</term>
          <def>
            <p>residual network</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">SVM</term>
          <def>
            <p>support vector machine</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>The data sets generated and analyzed during this study are available from the corresponding author upon reasonable request.</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Manesse</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ferdenzi</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Mantel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sabri</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bessy</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fournel</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Faure</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Bellil</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Landis</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hugentobler</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Giboreau</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Rouby</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Bensafi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The prevalence of olfactory deficits and their effects on eating behavior from childhood to old age: a large-scale study in the French population</article-title>
          <source>Food Qual Prefer</source>
          <year>2021</year>
          <month>10</month>
          <volume>93</volume>
          <fpage>104273</fpage>
          <pub-id pub-id-type="doi">10.1016/j.foodqual.2021.104273</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Croy</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Nordin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hummel</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Olfactory disorders and quality of life--an updated review</article-title>
          <source>Chem Senses</source>
          <year>2014</year>
          <month>03</month>
          <day>15</day>
          <volume>39</volume>
          <issue>3</issue>
          <fpage>185</fpage>
          <lpage>194</lpage>
          <pub-id pub-id-type="doi">10.1093/chemse/bjt072</pub-id>
          <pub-id pub-id-type="medline">24429163</pub-id>
          <pub-id pub-id-type="pii">bjt072</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Glisky</surname>
              <given-names>EL</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Riddle</surname>
              <given-names>DR</given-names>
            </name>
          </person-group>
          <article-title>Changes in cognitive function in human aging</article-title>
          <source>Brain Aging: Models, Methods, and Mechanisms</source>
          <year>2007</year>
          <publisher-loc>Boca Raton, FL</publisher-loc>
          <publisher-name>CRC Press/Taylor &#38; Francis</publisher-name>
          <fpage>3</fpage>
          <lpage>20</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dulay</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Olfactory acuity and cognitive function converge in older adulthood: support for the common cause hypothesis</article-title>
          <source>Psychol Aging</source>
          <year>2002</year>
          <volume>17</volume>
          <issue>3</issue>
          <fpage>392</fpage>
          <lpage>404</lpage>
          <pub-id pub-id-type="doi">10.1037//0882-7974.17.3.392</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Deems</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Doty</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Settle</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Moore-Gillon</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Shaman</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Mester</surname>
              <given-names>AF</given-names>
            </name>
            <name name-style="western">
              <surname>Kimmelman</surname>
              <given-names>CP</given-names>
            </name>
            <name name-style="western">
              <surname>Brightman</surname>
              <given-names>VJ</given-names>
            </name>
            <name name-style="western">
              <surname>Snow</surname>
              <given-names>JB</given-names>
            </name>
          </person-group>
          <article-title>Smell and taste disorders, a study of 750 patients from the University of Pennsylvania Smell and Taste Center</article-title>
          <source>Arch Otolaryngol Head Neck Surg</source>
          <year>1991</year>
          <month>05</month>
          <day>01</day>
          <volume>117</volume>
          <issue>5</issue>
          <fpage>519</fpage>
          <lpage>528</lpage>
          <pub-id pub-id-type="doi">10.1001/archotol.1991.01870170065015</pub-id>
          <pub-id pub-id-type="medline">2021470</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hoffman</surname>
              <given-names>HJ</given-names>
            </name>
            <name name-style="western">
              <surname>Rawal</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Duffy</surname>
              <given-names>VB</given-names>
            </name>
          </person-group>
          <article-title>New chemosensory component in the U.S. National Health and Nutrition Examination Survey (NHANES): first-year results for measured olfactory dysfunction</article-title>
          <source>Rev Endocr Metab Disord</source>
          <year>2016</year>
          <month>06</month>
          <volume>17</volume>
          <issue>2</issue>
          <fpage>221</fpage>
          <lpage>240</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/27287364"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11154-016-9364-1</pub-id>
          <pub-id pub-id-type="medline">27287364</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11154-016-9364-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC5033684</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Verma</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Singh</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Tiwari</surname>
              <given-names>RK</given-names>
            </name>
            <name name-style="western">
              <surname>Srivastava</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Verma</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Asian J Plant Sci Res</source>
          <year>2012</year>
          <volume>2</volume>
          <issue>4</issue>
          <fpage>503</fpage>
          <lpage>509</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karakaya</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ulucan</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Turkan</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>A comparative analysis on fruit freshness classification</article-title>
          <year>2019</year>
          <conf-name>ASYU 2019: IEEE Innovations in Intelligent Systems and Applications Conference</conf-name>
          <conf-date>October 31-November 2, 2019</conf-date>
          <conf-loc>Izmir, Turkey</conf-loc>
          <fpage>1</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1109/asyu48272.2019.8946385</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jagtap</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bhatt</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Thik</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rahimifard</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Monitoring potato waste in food manufacturing using image processing and internet of things approach</article-title>
          <source>Sustainability</source>
          <year>2019</year>
          <month>06</month>
          <day>05</day>
          <volume>11</volume>
          <issue>11</issue>
          <fpage>3173</fpage>
          <pub-id pub-id-type="doi">10.3390/su11113173</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Abdelkhalek</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Alfayad</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Benouezdou</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Fayek</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Chassagne</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Compact and embedded electronic nose for volatile and non-volatile odor classification for robot applications</article-title>
          <source>IEEE Access</source>
          <year>2019</year>
          <volume>7</volume>
          <fpage>98267</fpage>
          <lpage>98276</lpage>
          <pub-id pub-id-type="doi">10.1109/access.2019.2928875</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>Hawkins</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>The development of optical chemical sensors for the detection of volatile compounds from spoiled hams</article-title>
          <source>Sensors Actuators B: Chem</source>
          <year>1997</year>
          <month>3</month>
          <volume>39</volume>
          <issue>1-3</issue>
          <fpage>390</fpage>
          <lpage>394</lpage>
          <pub-id pub-id-type="doi">10.1016/s0925-4005(97)80240-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Caya</surname>
              <given-names>MVC</given-names>
            </name>
            <name name-style="western">
              <surname>Cruz</surname>
              <given-names>FRG</given-names>
            </name>
            <name name-style="western">
              <surname>Fernando</surname>
              <given-names>CMN</given-names>
            </name>
            <name name-style="western">
              <surname>Lafuente</surname>
              <given-names>RMM</given-names>
            </name>
            <name name-style="western">
              <surname>Malonzo</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>WY</given-names>
            </name>
          </person-group>
          <article-title>Monitoring and detection of fruits and vegetables spoilage in the refrigerator using electronic nose based on principal component analysis</article-title>
          <year>2019</year>
          <conf-name>HNICEM 2019: IEEE 11th International Conference on Humanoid, Nanotechnology, Information Technology, Communication and Control, Environment, and Management</conf-name>
          <conf-date>2019</conf-date>
          <conf-loc>Laoag, Philippines</conf-loc>
          <pub-id pub-id-type="doi">10.1109/hnicem48295.2019.9072715</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tian</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Rapid classification of hairtail fish and pork freshness using an electronic nose based on the PCA method</article-title>
          <source>Sensors (Basel)</source>
          <year>2012</year>
          <month>12</month>
          <day>28</day>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>260</fpage>
          <lpage>277</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s120100260"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s120100260</pub-id>
          <pub-id pub-id-type="medline">22368468</pub-id>
          <pub-id pub-id-type="pii">s120100260</pub-id>
          <pub-id pub-id-type="pmcid">PMC3279212</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mikš-Krajnik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Yoon</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ukuku</surname>
              <given-names>DO</given-names>
            </name>
            <name name-style="western">
              <surname>Yuk</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Identification and quantification of volatile chemical spoilage indexes associated with bacterial growth dynamics in aerobically stored chicken</article-title>
          <source>J Food Sci</source>
          <year>2016</year>
          <month>08</month>
          <day>22</day>
          <volume>81</volume>
          <issue>8</issue>
          <fpage>M2006</fpage>
          <lpage>M2014</lpage>
          <pub-id pub-id-type="doi">10.1111/1750-3841.13371</pub-id>
          <pub-id pub-id-type="medline">27332555</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Janagama</surname>
              <given-names>HK</given-names>
            </name>
            <name name-style="western">
              <surname>Mai</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Han</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Nadala</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Nadala</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Samadpour</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Simultaneous detection of multiple wine-spoilage organisms using a PCR-based DNA dipstick assay</article-title>
          <source>J AOAC Int</source>
          <year>2019</year>
          <month>03</month>
          <day>01</day>
          <volume>102</volume>
          <issue>2</issue>
          <fpage>490</fpage>
          <lpage>496</lpage>
          <pub-id pub-id-type="doi">10.5740/jaoacint.18-0144</pub-id>
          <pub-id pub-id-type="medline">30126467</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Xue</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wan</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Real-time assessment of food freshness in refrigerators based on a miniaturized electronic nose</article-title>
          <source>Anal Methods</source>
          <year>2018</year>
          <month>10</month>
          <day>11</day>
          <volume>10</volume>
          <issue>39</issue>
          <fpage>4741</fpage>
          <lpage>4749</lpage>
          <pub-id pub-id-type="doi">10.1039/c8ay01242c</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Perez-Daniel</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Fierro-Radilla</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Peñaloza-Cobos</surname>
              <given-names>JP</given-names>
            </name>
          </person-group>
          <article-title>Rotten fruit detection using a one stage object detector</article-title>
          <year>2020</year>
          <month>10</month>
          <conf-name>MICAI 2020: Mexican International Conference on Artificial Intelligence</conf-name>
          <conf-date>October 12-17, 2020</conf-date>
          <conf-loc>Mexico City, Mexico</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-030-60887-3_29</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roy</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Chaudhuri</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Bhattacharjee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Manna</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Chakraborty</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Segmentation techniques for rotten fruit detection</article-title>
          <year>2019</year>
          <conf-name>2019 IEEE International Conference on Opto-Electronics and Applied Optics (Optronix)</conf-name>
          <conf-date>March 18-20, 2019</conf-date>
          <conf-loc>Kolkata, India</conf-loc>
          <fpage>1</fpage>
          <lpage>4</lpage>
          <pub-id pub-id-type="doi">10.1109/optronix.2019.8862367</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blasco</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gomez-Sanchis</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Aleixos</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Cubero</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Juste</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Molto</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Detection of fungal infestation in citrus fruits using hyperspectral imaging</article-title>
          <year>2009</year>
          <conf-name>5th International Symposium on “Food Processing, Monitoring Technology in Bioprocesses and Food Quality Management”</conf-name>
          <conf-date>August 31-September 2, 2009</conf-date>
          <conf-loc>Potsdam, Germany</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ellis</surname>
              <given-names>DI</given-names>
            </name>
            <name name-style="western">
              <surname>Broadhurst</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kell</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Rowland</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Goodacre</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Rapid and quantitative detection of the microbial spoilage of meat by Fourier transform infrared spectroscopy and machine learning</article-title>
          <source>Appl Environ Microbiol</source>
          <year>2002</year>
          <month>06</month>
          <volume>68</volume>
          <issue>6</issue>
          <fpage>2822</fpage>
          <lpage>2828</lpage>
          <pub-id pub-id-type="doi">10.1128/aem.68.6.2822-2828.2002</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Batugal</surname>
              <given-names>CL</given-names>
            </name>
            <name name-style="western">
              <surname>Gupo</surname>
              <given-names>JMP</given-names>
            </name>
            <name name-style="western">
              <surname>Mendoza</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Santos</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Malabanan</surname>
              <given-names>FA</given-names>
            </name>
            <name name-style="western">
              <surname>Tabing</surname>
              <given-names>JNT</given-names>
            </name>
            <name name-style="western">
              <surname>Escarez</surname>
              <given-names>CB</given-names>
            </name>
          </person-group>
          <article-title>EyeSmell: rice spoilage detection using azure custom vision in Raspberry Pi 3</article-title>
          <year>2020</year>
          <conf-name>2020 IEEE Region 10 Conference (TENCON)</conf-name>
          <conf-date>November 16-19, 2020</conf-date>
          <conf-loc>Osaka, Japan</conf-loc>
          <fpage>738</fpage>
          <lpage>743</lpage>
          <pub-id pub-id-type="doi">10.1109/tencon50793.2020.9293915</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Musa</surname>
              <given-names>MNI</given-names>
            </name>
            <name name-style="western">
              <surname>Marimuthu</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Rashid</surname>
              <given-names>HNM</given-names>
            </name>
            <name name-style="western">
              <surname>Sambasevam</surname>
              <given-names>KP</given-names>
            </name>
          </person-group>
          <article-title>Development of pH indicator film composed of corn starch-glycerol and anthocyanin from Hibiscus sabdariffa</article-title>
          <year>2019</year>
          <conf-name>ICYC 2019: 7th International Conference for Young Chemists</conf-name>
          <conf-date>August 14, 2019</conf-date>
          <conf-loc>George Town, Malaysia</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Caya</surname>
              <given-names>MVC</given-names>
            </name>
            <name name-style="western">
              <surname>Cruz</surname>
              <given-names>FRG</given-names>
            </name>
            <name name-style="western">
              <surname>Blas</surname>
              <given-names>PJR</given-names>
            </name>
            <name name-style="western">
              <surname>Cagalingan</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Malbas</surname>
              <given-names>RGL</given-names>
            </name>
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>WY</given-names>
            </name>
          </person-group>
          <article-title>Determining spoilage level against time and temperature of tomato-based Filipino cuisines using electronic nose</article-title>
          <year>2017</year>
          <conf-name>HNICEM 2017: IEEE 9th International Conference on Humanoid, Nanotechnology, Information Technology, Communication and Control, Environment and Management</conf-name>
          <conf-date>December 1-3, 2017</conf-date>
          <conf-loc>Manila, Philippines</conf-loc>
          <pub-id pub-id-type="doi">10.1109/hnicem.2017.8269443</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>ul Hasan</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ejaz</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ejaz</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>HS</given-names>
            </name>
          </person-group>
          <article-title>Meat and fish freshness inspection system based on odor sensing</article-title>
          <source>Sensors (Basel)</source>
          <year>2012</year>
          <month>11</month>
          <day>09</day>
          <volume>12</volume>
          <issue>11</issue>
          <fpage>15542</fpage>
          <lpage>15557</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=s121115542"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/s121115542</pub-id>
          <pub-id pub-id-type="medline">23202222</pub-id>
          <pub-id pub-id-type="pii">s121115542</pub-id>
          <pub-id pub-id-type="pmcid">PMC3522975</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Green</surname>
              <given-names>GC</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>AD</given-names>
            </name>
            <name name-style="western">
              <surname>Goubran</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Identification of food spoilage in the smart home based on neural and fuzzy processing of odour sensor responses</article-title>
          <year>2009</year>
          <conf-name>2009 Annual International Conference of the IEEE Engineering in Medicine and Biology Society</conf-name>
          <conf-date>September 3-6, 2009</conf-date>
          <conf-loc>Minneapolis, MN</conf-loc>
          <pub-id pub-id-type="doi">10.1109/iembs.2009.5335374</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kodogiannis</surname>
              <given-names>VS</given-names>
            </name>
            <name name-style="western">
              <surname>Alshejari</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>An asymmetric neuro-fuzzy model for the detection of meat spoilage</article-title>
          <year>2020</year>
          <conf-name>IJCNN 2020: IEEE International Joint Conference on Neural Networks</conf-name>
          <conf-date>July 19-24, 2020</conf-date>
          <conf-loc>Glasgow, UK</conf-loc>
          <pub-id pub-id-type="doi">10.1109/ijcnn48605.2020.9207063</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Increased U.S. imports of fresh fruits and vegetables</article-title>
          <source>United States Department of Agriculture</source>
          <year>2007</year>
          <month>9</month>
          <access-date>2024-06-07</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.ers.usda.gov/webdocs/outlooks/37024/11124_fts32801_1_.pdf?v=725">https://www.ers.usda.gov/webdocs/outlooks/37024/11124_fts32801_1_.pdf?v=725</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kalluri</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Fruits fresh and rotten for classification: apples oranges bananas</article-title>
          <source>Kaggle</source>
          <access-date>2024-06-07</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.kaggle.com/datasets/sriramr/fruits-fresh-and-rotten-for-classification">https://www.kaggle.com/datasets/sriramr/fruits-fresh-and-rotten-for-classification</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lewis</surname>
              <given-names>JR</given-names>
            </name>
          </person-group>
          <article-title>Psychometric evaluation of an after-scenario questionnaire for computer usability studies</article-title>
          <source>SIGCHI Bull</source>
          <year>1991</year>
          <month>01</month>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>78</fpage>
          <lpage>81</lpage>
          <pub-id pub-id-type="doi">10.1145/122672.122692</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Longhurst</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Clifford</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>French</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Valentine</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Semi-structured interviews and focus groups</article-title>
          <source>Key Methods in Geography</source>
          <year>2003</year>
          <publisher-loc>Thousand Oaks, CA</publisher-loc>
          <publisher-name>SAGE Publications</publisher-name>
          <fpage>143</fpage>
          <lpage>156</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Page</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>McKenzie</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Bossuyt</surname>
              <given-names>PM</given-names>
            </name>
            <name name-style="western">
              <surname>Boutron</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hoffmann</surname>
              <given-names>TC</given-names>
            </name>
            <name name-style="western">
              <surname>Mulrow</surname>
              <given-names>CD</given-names>
            </name>
            <name name-style="western">
              <surname>Shamseer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tetzlaff</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Moher</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Updating guidance for reporting systematic reviews: development of the PRISMA 2020 statement</article-title>
          <source>J Clin Epidemiol</source>
          <year>2021</year>
          <month>06</month>
          <volume>134</volume>
          <fpage>103</fpage>
          <lpage>112</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jclinepi.2021.02.003</pub-id>
          <pub-id pub-id-type="medline">33577987</pub-id>
          <pub-id pub-id-type="pii">S0895-4356(21)00040-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Otsu</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>A threshold selection method from gray-level histograms</article-title>
          <source>IEEE Trans Syst Man Cybern</source>
          <year>1979</year>
          <month>1</month>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>62</fpage>
          <lpage>66</lpage>
          <pub-id pub-id-type="doi">10.1109/tsmc.1979.4310076</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>He</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Ren</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep residual learning for image recognition</article-title>
          <year>2016</year>
          <conf-name>2016 IEEE Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 26-July 1, 2016</conf-date>
          <conf-loc>Las Vegas, NV</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr.2016.90</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>DiMarzio</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>Beginning Android Programming with Android Studio</source>
          <year>2016</year>
          <publisher-loc>Hoboken, NJ</publisher-loc>
          <publisher-name>John Wiley &#38; Sons</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Walker</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Takayama</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Landay</surname>
              <given-names>Ja</given-names>
            </name>
          </person-group>
          <article-title>High-fidelity or low-fidelity, paper or computer? Choosing attributes when testing web prototypes</article-title>
          <source>Proc Hum Factors Ergonom Soc Annu Meeting</source>
          <year>2016</year>
          <month>11</month>
          <day>06</day>
          <volume>46</volume>
          <issue>5</issue>
          <fpage>661</fpage>
          <lpage>665</lpage>
          <pub-id pub-id-type="doi">10.1177/154193120204600513</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McKight</surname>
              <given-names>PE</given-names>
            </name>
            <name name-style="western">
              <surname>Najab</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Kruskal‐Wallis test</article-title>
          <source>The Corsini Encyclopedia of Psychology</source>
          <year>2010</year>
          <publisher-loc>Hoboken, NJ</publisher-loc>
          <publisher-name>John Wiley &#38; Sons</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Warijan</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Wahyudi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Astuti</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Rahayu</surname>
              <given-names>RD</given-names>
            </name>
          </person-group>
          <article-title>Nursing care of hypertension in the elderly with a focus on study of activity intolerance</article-title>
          <source>J Studi Keperawatan</source>
          <year>2021</year>
          <month>03</month>
          <day>31</day>
          <volume>2</volume>
          <issue>1</issue>
          <fpage>14</fpage>
          <lpage>23</lpage>
          <pub-id pub-id-type="doi">10.31983/j-sikep.v2i1.6805</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>LA</given-names>
            </name>
          </person-group>
          <article-title>Snowball sampling</article-title>
          <source>Ann Math Stat</source>
          <year>1961</year>
          <month>03</month>
          <volume>32</volume>
          <issue>1</issue>
          <fpage>148</fpage>
          <lpage>170</lpage>
          <pub-id pub-id-type="doi">10.1214/aoms/1177705148</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Corbin</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Strauss</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <source>Basics of Qualitative Research: Techniques and Procedures for Developing Grounded Theory</source>
          <year>2014</year>
          <publisher-loc>Thousand Oaks, CA</publisher-loc>
          <publisher-name>SAGE Publications</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patton</surname>
              <given-names>MQ</given-names>
            </name>
          </person-group>
          <source>Qualitative Research &#38; Evaluation Methods: Integrating Theory and Practice</source>
          <year>2014</year>
          <publisher-loc>Thousand Oaks, CA</publisher-loc>
          <publisher-name>SAGE Publications</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gwak</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gwon</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Eom</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hong</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ku</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Exploring the community of older adult viewers on YouTube</article-title>
          <source>Univ Access Inf Soc</source>
          <year>2022</year>
          <month>09</month>
          <day>22</day>
          <volume>22</volume>
          <issue>4</issue>
          <fpage>1393</fpage>
          <lpage>1404</lpage>
          <pub-id pub-id-type="doi">10.1007/s10209-022-00918-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harris</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Blocker</surname>
              <given-names>KA</given-names>
            </name>
            <name name-style="western">
              <surname>Rogers</surname>
              <given-names>WA</given-names>
            </name>
          </person-group>
          <article-title>Older adults and smart technology: facilitators and barriers to use</article-title>
          <source>Front Comput Sci</source>
          <year>2022</year>
          <month>5</month>
          <day>4</day>
          <volume>4</volume>
          <fpage>835927</fpage>
          <pub-id pub-id-type="doi">10.3389/fcomp.2022.835927</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chung</surname>
              <given-names>JK</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kee</surname>
              <given-names>HY</given-names>
            </name>
            <name name-style="western">
              <surname>Choi</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>Seo</surname>
              <given-names>JJ</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>SH</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>MG</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>ES</given-names>
            </name>
          </person-group>
          <article-title>Prevalence of food poisoning bacteria on hands in various age groups</article-title>
          <source>J Food Hyg Saf</source>
          <year>2008</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>40</fpage>
          <lpage>50</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Klaver</surname>
              <given-names>NS</given-names>
            </name>
            <name name-style="western">
              <surname>van de Klundert</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>van den Broek</surname>
              <given-names>RJGM</given-names>
            </name>
            <name name-style="western">
              <surname>Askari</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Relationship between perceived risks of using mHealth applications and the intention to use them among older adults in the Netherlands: cross-sectional study</article-title>
          <source>JMIR Mhealth Uhealth</source>
          <year>2021</year>
          <month>08</month>
          <day>30</day>
          <volume>9</volume>
          <issue>8</issue>
          <fpage>e26845</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mhealth.jmir.org/2021/8/e26845/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/26845</pub-id>
          <pub-id pub-id-type="medline">34459745</pub-id>
          <pub-id pub-id-type="pii">v9i8e26845</pub-id>
          <pub-id pub-id-type="pmcid">PMC8438611</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Patel</surname>
              <given-names>KK</given-names>
            </name>
            <name name-style="western">
              <surname>Kar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jha</surname>
              <given-names>SN</given-names>
            </name>
            <name name-style="western">
              <surname>Khan</surname>
              <given-names>MA</given-names>
            </name>
          </person-group>
          <article-title>Machine vision system: a tool for quality inspection of food and agricultural products</article-title>
          <source>J Food Sci Technol</source>
          <year>2012</year>
          <month>04</month>
          <volume>49</volume>
          <issue>2</issue>
          <fpage>123</fpage>
          <lpage>141</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/23572836"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s13197-011-0321-4</pub-id>
          <pub-id pub-id-type="medline">23572836</pub-id>
          <pub-id pub-id-type="pii">321</pub-id>
          <pub-id pub-id-type="pmcid">PMC3550871</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Vasudevan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Yeung</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Seyedhosseini</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>CoCa: contrastive captioners are image-text foundation models</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online May 4, 2022</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2205.01917</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wortsman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ilharco</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gadre</surname>
              <given-names>SY</given-names>
            </name>
            <name name-style="western">
              <surname>Roelofs</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Gontijo-Lopes</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Morcos</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Schmidt</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time</article-title>
          <year>2022</year>
          <conf-name>ICML 2022: Thirty-ninth International Conference on Machine Learning</conf-name>
          <conf-date>July 17-23, 2022</conf-date>
          <conf-loc>Baltimore, MD</conf-loc>
          <fpage>23965</fpage>
          <lpage>23998</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Guo</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Xu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Deng</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Liu</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>W</given-names>
            </name>
          </person-group>
          <article-title>Pre-trained image processing transformer</article-title>
          <year>2021</year>
          <conf-name>CVPR 2021: IEEE/CVF Conference on Computer Vision and Pattern Recognition</conf-name>
          <conf-date>June 20-25, 2021</conf-date>
          <conf-loc>Nashville, TN</conf-loc>
          <pub-id pub-id-type="doi">10.1109/cvpr46437.2021.01212</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chao</surname>
              <given-names>WL</given-names>
            </name>
            <name name-style="western">
              <surname>Changpinyo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gong</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sha</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>An empirical study and analysis of generalized zero-shot learning for object recognition in the wild</article-title>
          <year>2016</year>
          <conf-name>Computer Vision–ECCV 2016: 14th European Conference</conf-name>
          <conf-date>October 11-14, 2016</conf-date>
          <conf-loc>Amsterdam, the Netherlands</conf-loc>
          <fpage>52</fpage>
          <lpage>68</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-46475-6_4</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
