<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JFR</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id>
      <journal-title>JMIR Formative Research</journal-title>
      <issn pub-type="epub">2561-326X</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i1e53928</article-id>
      <article-id pub-id-type="pmid">39842001</article-id>
      <article-id pub-id-type="doi">10.2196/53928</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Discrimination of Radiologists' Experience Level Using Eye-Tracking Technology and Machine Learning: Case Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Mavragani</surname>
            <given-names>Amaryllis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Silva</surname>
            <given-names>Nelson</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hirota</surname>
            <given-names>Masakazu</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Martinez</surname>
            <given-names>Stanford</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7735-6572</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Ramirez-Tamayo</surname>
            <given-names>Carolina</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-4413-1366</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Akhter Faruqui</surname>
            <given-names>Syed Hasib</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-5073-8690</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Clark</surname>
            <given-names>Kal</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2511-7577</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Alaeddini</surname>
            <given-names>Adel</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <address>
            <institution>Department of Mechanical Engineering</institution>
            <institution>Southern Methodist University</institution>
            <addr-line>3101 Dyer Street</addr-line>
            <addr-line>Dallas, TX, 75205</addr-line>
            <country>United States</country>
            <phone>1 214 768 3050</phone>
            <email>aalaeddini@smu.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4451-3150</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Czarnek</surname>
            <given-names>Nicholas</given-names>
          </name>
          <degrees>PhD</degrees>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7889-0184</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>Aggarwal</surname>
            <given-names>Aarushi</given-names>
          </name>
          <degrees>MD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6557-680X</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Emamzadeh</surname>
            <given-names>Sahra</given-names>
          </name>
          <degrees>MD, MPH</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1053-1441</ext-link>
        </contrib>
        <contrib id="contrib9" contrib-type="author">
          <name name-style="western">
            <surname>Mock</surname>
            <given-names>Jeffrey R</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0446-6687</ext-link>
        </contrib>
        <contrib id="contrib10" contrib-type="author">
          <name name-style="western">
            <surname>Golob</surname>
            <given-names>Edward J</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-1560-9076</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Mechanical Engineering</institution>
        <institution>The University of Texas at San Antonio</institution>
        <addr-line>San Antonio, TX</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Engineering Technology</institution>
        <institution>Sam Houston State University</institution>
        <addr-line>Huntsville, TX</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Radiology</institution>
        <institution>University of Texas Health Science Center at San Antonio</institution>
        <addr-line>San Antonio, TX</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Mechanical Engineering</institution>
        <institution>Southern Methodist University</institution>
        <addr-line>Dallas, TX</addr-line>
        <country>United States</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Psychology</institution>
        <institution>The University of Texas at San Antonio</institution>
        <addr-line>San Antonio, TX</addr-line>
        <country>United States</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Adel Alaeddini <email>aalaeddini@smu.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2025</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>22</day>
        <month>1</month>
        <year>2025</year>
      </pub-date>
      <volume>9</volume>
      <elocation-id>e53928</elocation-id>
      <history>
        <date date-type="received">
          <day>24</day>
          <month>10</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>5</day>
          <month>12</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>31</day>
          <month>5</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>22</day>
          <month>6</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Stanford Martinez, Carolina Ramirez-Tamayo, Syed Hasib Akhter Faruqui, Kal Clark, Adel Alaeddini, Nicholas Czarnek, Aarushi Aggarwal, Sahra Emamzadeh, Jeffrey R Mock, Edward J Golob. Originally published in JMIR Formative Research (https://formative.jmir.org), 22.01.2025.</copyright-statement>
      <copyright-year>2025</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on https://formative.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://formative.jmir.org/2025/1/e53928" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Perception-related errors comprise most diagnostic mistakes in radiology. To mitigate this problem, radiologists use personalized and high-dimensional visual search strategies, otherwise known as search patterns. Qualitative descriptions of these search patterns, which involve the physician verbalizing or annotating the order he or she analyzes the image, can be unreliable due to discrepancies in what is reported versus the actual visual patterns. This discrepancy can interfere with quality improvement interventions and negatively impact patient care.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The objective of this study is to provide an alternative method for distinguishing between radiologists by means of captured eye-tracking data such that the raw gaze (or processed fixation data) can be used to discriminate users based on subconscious behavior in visual inspection.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>We present a novel discretized feature encoding based on spatiotemporal binning of fixation data for efficient geometric alignment and temporal ordering of eye movement when reading chest x-rays. The encoded features of the eye-fixation data are used by machine learning classifiers to discriminate between faculty and trainee radiologists. A clinical trial case study was conducted using metrics such as the area under the curve, accuracy, <italic>F</italic><sub>1</sub>-score, sensitivity, and specificity to evaluate the discriminability between the 2 groups regarding their level of experience. The classification performance was then compared with state-of-the-art methodologies. In addition, a repeatability experiment using a separate dataset, experimental protocol, and eye tracker was performed with 8 participants to evaluate the robustness of the proposed approach.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The numerical results from both experiments demonstrate that classifiers using the proposed feature encoding methods outperform the current state-of-the-art in differentiating between radiologists in terms of experience level. An average performance gain of 6.9% is observed compared with traditional features while classifying experience levels of radiologists. This gain in accuracy is also substantial across different eye tracker–collected datasets, with improvements of 6.41% using the Tobii eye tracker and 7.29% using the EyeLink eye tracker. These results signify the potential impact of the proposed method for identifying radiologists’ level of expertise and those who would benefit from additional training.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>The effectiveness of the proposed spatiotemporal discretization approach, validated across diverse datasets and various classification metrics, underscores its potential for objective evaluation, informing targeted interventions and training strategies in radiology. This research advances reliable assessment tools, addressing challenges in perception-related errors to enhance patient care outcomes.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>machine learning</kwd>
        <kwd>eye-tracking</kwd>
        <kwd>experience level determination</kwd>
        <kwd>radiology education</kwd>
        <kwd>search pattern feature extraction</kwd>
        <kwd>search pattern</kwd>
        <kwd>radiology</kwd>
        <kwd>classification</kwd>
        <kwd>gaze</kwd>
        <kwd>fixation</kwd>
        <kwd>education</kwd>
        <kwd>experience</kwd>
        <kwd>spatio-temporal</kwd>
        <kwd>image</kwd>
        <kwd>x-ray</kwd>
        <kwd>eye movement</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Lung cancer is the leading cause of cancer death, claiming 139,000 American lives yearly [<xref ref-type="bibr" rid="ref1">1</xref>]. To mitigate its impact, the US Preventative Task Force recommends annual radiological screening for at-risk individuals [<xref ref-type="bibr" rid="ref2">2</xref>]. Radiologists identify suspicious lung lesions (nodules) from patient chest images and recommend further management, including biopsy, continued surveillance, or further workup. Radiological surveillance reduces population mortality from lung cancer, but it is estimated that radiologists will make errors on 33% of abnormal chest exams, eliminating the chance for patients to start lifesaving treatment [<xref ref-type="bibr" rid="ref3">3</xref>]. The predominant source of these errors is not deficient medical knowledge. Rather, errors primarily stem from the methods radiologists use to visually inspect the image, referred to as perceptual errors [<xref ref-type="bibr" rid="ref4">4</xref>]. In other words, perceptual errors in radiology are mistakes that occur during the visual inspection and interpretation of medical images. They are distinct from cognitive errors, which involve incorrect reasoning or decision-making based on observed information. There are 2 primary patterns for overlooking a disease due to perceptual errors:</p>
      <list list-type="bullet">
        <list-item>
          <p>Examining the affected area but ignoring the disease: This occurs when the radiologist inspects the region with the abnormality but fails to recognize it, possibly due to subtle presentation, distractions, or visual fatigue.</p>
        </list-item>
        <list-item>
          <p>Not examining the affected area: This happens when the radiologist misses the region with the abnormality entirely, often due to inefficient search patterns, incomplete scanning, or being misled by more prominent findings elsewhere.</p>
        </list-item>
      </list>
      <p>Kundel [<xref ref-type="bibr" rid="ref5">5</xref>] investigated the effects of perceptual errors in radiology and concluded that decisions and outcomes improve when radiologists’ experiences are enhanced.</p>
      <p>Radiologists and radiology educators understand the stakes associated with missed diagnoses due to perceptual errors but have limited tools to combat these errors. Classical educational texts include general concepts, for example, “...scan the areas of least interest first, working toward the more important areas” [<xref ref-type="bibr" rid="ref6">6</xref>], which, unfortunately, are inadequate to improve radiologist performance meaningfully.</p>
      <p>Eye-tracking technology has been previously proposed as a tool to evaluate radiologist perception. Eye trackers are powerful because they provide high (&#62;30 Hz) temporal and spatial resolution (approximately 1 degree of error). With the aid of eye tracking, quantitative analyses can be performed to understand the cognitive and perceptual processes better. Eye-tracking technology has previously proven relevant in evaluating decision-making processes [<xref ref-type="bibr" rid="ref7">7</xref>], attention interruption [<xref ref-type="bibr" rid="ref8">8</xref>], skill level determination [<xref ref-type="bibr" rid="ref9">9</xref>], and impact of search pattern education [<xref ref-type="bibr" rid="ref10">10</xref>].</p>
      <p>In 2017, van der Gijp et al [<xref ref-type="bibr" rid="ref11">11</xref>] performed a systematic literature review outlining the current state of science concerning visual perception in radiology. A key tenet is the global-focal search model [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>], which can be summarized as the generation of an initial, fast global impression followed by a more detailed focal search. Eye-tracking technology allows these principles to be tested and potentially optimized to evaluate all clinically relevant portions of the exam in greater detail. Of the 22 relevant articles van der Gijp et al [<xref ref-type="bibr" rid="ref11">11</xref>] reviewed, a consensus “traditional” feature set consisting of 5 features that could be experimentally measured was found to be associated with expertise.</p>
      <p>Despite the development of this consensus feature set, visual search complexity may not be adequately captured by simple, low-dimensional features that do not fully describe how visual perception relates to skill. Machine learning is well-suited to provide deeper insight into radiologist visual search behavior and how this relates to radiologist performance. Waite et al [<xref ref-type="bibr" rid="ref3">3</xref>] highlighted the importance of understanding perceptual expertise in radiology and the potential use of eye-tracking and perceptual learning methods in medical training to improve diagnostic accuracy. Lim et al [<xref ref-type="bibr" rid="ref15">15</xref>] identified several features that can be extracted from eye-tracking data, including pupil size, saccade, fixations, velocity, blink, pupil position, electrooculogram, and gaze point, to be used in machine learning models. Among these features, fixation was the most commonly used feature in the studies reviewed.</p>
      <p>Shamyuktha et al [<xref ref-type="bibr" rid="ref16">16</xref>] developed a machine learning framework using eye gaze data such as saccade latency and amplitude to classify expert and nonexpert radiologists. Harezlak et al [<xref ref-type="bibr" rid="ref17">17</xref>] investigated eye movement traits to differentiate experts and laymen in a similar study. Akshay et al [<xref ref-type="bibr" rid="ref18">18</xref>] proposed a machine learning algorithm to identify eye movement metrics using raw eye-tracking data. Rizzo et al [<xref ref-type="bibr" rid="ref19">19</xref>] used machine learning to detect cognitive interference based on eye-tracking data. Öder et al [<xref ref-type="bibr" rid="ref20">20</xref>] applied machine learning to classify familiar web users based on eye-tracking data. Indeed, these techniques can be used to enhance competency assessment and feedback techniques in radiologists.</p>
      <p>Eye tracking also holds the potential for understanding the longitudinal aspects of competency progression in medical education, allowing for examining how interpretive and diagnostic skills develop over time. Karargyris et al [<xref ref-type="bibr" rid="ref21">21</xref>] and Bigolin Lanfredi et al [<xref ref-type="bibr" rid="ref22">22</xref>] created and validated chest x-ray datasets with eye-tracking data and report dictation for developing such artificial intelligence systems. These datasets aim to support the research community in developing more complex support tools for radiology research.</p>
      <p>In this study, we use machine learning to compare the discriminability of 2 radiologists of different skill levels using, first, the aforementioned “traditional” gaze-based features (such as time to scan, saccade length, the total number of fixations, and total regressive fixations) [<xref ref-type="bibr" rid="ref11">11</xref>] and second, the “proposed” features that we developed to describe high-dimensional visual search patterns thoroughly and quantitatively. We curate the traditional feature sets to those that could be practically acquired without laborious manual ground truthing of exams, as this would permit large-scale deployment of this technology to health care institutions. To highlight the use of eye-tracking data and artificial intelligence, we term our general approach “biometric radiology artificial intelligence.”</p>
      <p>The driving hypothesis behind the work presented in this paper is that gaze patterns measurably differ among radiologists as a function of their experience level. To test this hypothesis, we proposed a novel discretized feature encoding method that condenses fixation data into a few representative spatiotemporal bins for descriptive and predictive analytics purposes (<xref rid="figure1" ref-type="fig">Figure 1</xref>). With spatiotemporal binning, fixations are divided into a predefined number of temporal segments (bins). Within each temporal bin, the fixations are counted within spatial subdivisions of the image. This process results in a vector that captures detailed and structured information about both where and when fixations occurred. By splitting fixations into temporal bins, we capture the evolution of the visual search process over time, providing insights into how radiologists allocate their attention during different phases of image inspection. Also, spatial binning allows us to understand which regions of the image are being focused on and how frequently. In addition, this method transforms raw fixation data into structured features that can be effectively used by machine learning models.</p>
      <fig id="figure1" position="float">
        <label>Figure 1</label>
        <caption>
          <p>Overall algorithm: the steps required to generate proposed features from the raw dataset and build the proposed machine learning model.</p>
        </caption>
        <graphic xlink:href="formative_v9i1e53928_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
      </fig>
      <p>We collected the gaze fixation data from radiologists while they were reading the x-rays. These data were then segmented into fixed temporal groups before discretizing them to convert them into final encoded vectors. The final encoded features were then used in training machine learning algorithms to classify radiologists.</p>
      <p>Collecting data from 2 participants—1 faculty member (expert) and 1 resident (trainee)—we analyzed their behavior and level of experience using the proposed approach. Using stratified cross-validation over 10 folds, we compared the area under the curve (AUC) performance of several classifiers using the proposed methodology with the AUC performance of those same classifiers when using a traditional feature set (<xref ref-type="table" rid="table1">Table 1</xref>). We then confirmed our results using data from a second similarly designed, larger study evaluating 8 participants—4 faculty members (expert) and 4 residents (trainee). The remainder of the paper is structured as follows: <italic>Methods</italic> presents the data collection and preparation procedures and details of the proposed method; <italic>Results</italic> describes the simulation study and interpretation; and <italic>Discussion</italic> presents the discussion, concluding remarks, and advice to practitioners.</p>
      <table-wrap position="float" id="table1">
        <label>Table 1</label>
        <caption>
          <p>State-of-the-art features from 22 relevant studies.</p>
        </caption>
        <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
          <col width="190"/>
          <col width="400"/>
          <col width="410"/>
          <thead>
            <tr valign="top">
              <td>Attribute (per trial)</td>
              <td>Attribute description</td>
              <td>Association with high level of expertise (percentage of the total number of included studies)</td>
            </tr>
          </thead>
          <tbody>
            <tr valign="top">
              <td>Total time to scan</td>
              <td>Measures the total duration spent scanning the chest x-ray image, indicating the thoroughness of the visual inspection</td>
              <td>Decrease (45.45%)</td>
            </tr>
            <tr valign="top">
              <td>Regressive fixation count</td>
              <td>Counts the number of distinct locations revisited during the scan, suggesting areas of uncertainty or interest</td>
              <td>Increase (4.55%) or decrease (4.55%)</td>
            </tr>
            <tr valign="top">
              <td>Fixation count</td>
              <td>Total number of fixations, reflecting the intensity of the visual scrutiny</td>
              <td>Decrease (18.18%)</td>
            </tr>
            <tr valign="top">
              <td>Total saccade length</td>
              <td>Sum of all saccade lengths, indicating the extent and pattern of the visual search (time between fixations) [<xref ref-type="bibr" rid="ref23">23</xref>] captured in a single chest x-ray scan</td>
              <td>Increase (9.09%) or decrease (4.55%)</td>
            </tr>
            <tr valign="top">
              <td>Coverage</td>
              <td>Percentage of salient regions covered by the gaze, reflecting the comprehensiveness of the examination</td>
              <td>Increase (9.09%) or decrease (9.09%)</td>
            </tr>
          </tbody>
        </table>
      </table-wrap>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Study Design, Data Collection, and Preparation</title>
        <sec>
          <title>Overview</title>
          <p>The study design was prospective, controlled, block-randomized, and Institutional Review Board (IRB) approved. Each study participant completed 4 roughly 1-hour sessions in a radiology reading room, including tutorial, calibration, assessment, and annotation periods. The tutorial included an overview of the assessment period and instructions on how to perform dictation and annotation consistently. Calibration was performed to ensure that recorded and actual gaze were consistent based on a 9-point custom calibration mapping script.</p>
          <p>Nodule and normal cases were derived from the Shiraishi 2000 chest radiograph dataset [<xref ref-type="bibr" rid="ref24">24</xref>], which includes 154 chest radiographs with 5 degrees of subtlety from level 1 (extremely subtle) to level 5 (obvious). Distractor cases were derived from the VinDr chest radiograph dataset [<xref ref-type="bibr" rid="ref25">25</xref>]. A total of 3 sets of 6 nodule cases from the Japanese Society of Radiological Technology dataset, 1 set each from the intermediate difficulty levels (2, 3, and 4), and 1 set of 9 normal cases from the Japanese Society of Radiological Technology dataset were randomly sampled without replacement. In total, 2 cases each of pneumothorax, cardiomegaly, and consolidation from the VinDr dataset were randomly sampled without replacement to serve as distractor cases. These distractor cases functioned mainly to prevent control subject bias to the nodule detection task. Each participant reviewed the exams only once during the trial, and all study participants reviewed the same set of cases.</p>
          <p>A custom software tool was developed to automatically display the study images and capture time-stamped bilateral gaze, bilateral pupil, head pose, voice, annotation, and image display configuration data. No chin rest was used to ensure that the study was performed in a manner that was as close as possible to a clinical setting. After each session, data were transferred to a database for further analysis.</p>
        </sec>
        <sec>
          <title>Data Acquisition</title>
          <p>In the first study, the EyeLink 1000 eye tracker and software were used to collect eye-tracking data [<xref ref-type="bibr" rid="ref26">26</xref>]. A total of 2 participants—1 faculty member (9 years of faculty experience) and 1 resident (3 years of trainee experience)—observed a series of chest x-ray images, which contained a balanced class composition of normal scans (no abnormalities), abnormal scans (mass or nodule present), or abnormal scans with pleural effusion. A total of 110 trials (55 trials were studied by each participant) were recorded. We leveraged the EyeLink suite to remove most artifacts, such as blinks, from the eye-tracking data captured in each participant’s trial and manually filtered remaining artifacts, such as off-screen distractions left unprocessed (eg, the far-displaced fixations in <xref rid="figure2" ref-type="fig">Figure 2</xref>).</p>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Example of eye-tracking fixations for 1 trial processed by the EyeLink software. The fixations illustrated include participants 1 (blue) and 2 (red) superimposed on the image displayed during the trial. The “invalid” fixations that were not successfully filtered out are shown as “x” markers and were manually removed during data processing.</p>
            </caption>
            <graphic xlink:href="formative_v9i1e53928_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <p>In the second study, a Tobii 5L eye tracker was used [<xref ref-type="bibr" rid="ref27">27</xref>]. This second dataset included 8 participants (4 faculty with an average of 12.75 years of faculty experience and 4 trainees with 2.25 years of trainee experience), each scanning the same set of 27 images. The Tobii gaze data were unprocessed to evaluate the robustness of the proposed method to fixation postprocessing.</p>
        </sec>
        <sec>
          <title>Common (Traditional) Features</title>
          <p>To establish a necessary baseline to which the proposed methodology can compare, several attributes were established based on a meta-analysis done by van der Gijp et al [<xref ref-type="bibr" rid="ref11">11</xref>] in 2017. The baseline is used for two main reasons: (1) traditional features are well-known and correlate with radiologist expertise, serving as a necessary reference point to evaluate our proposed method’s effectiveness, and (2) comparing our novel discretized vector encoding method against this baseline demonstrates the added value, improved classification accuracy, and robustness of our new approach. We separated those features based on if they required ground truthing of exams. Meanwhile, ground truthing of medical exams is costly and time-consuming as it involves manual annotation by experts, which also limits the scalability of the method. In addition, reliance on ground truth annotations can introduce biases and errors, as the annotations themselves might vary between experts, which limits applicability and transferability to real-world applications. Consequently, features that required knowledge of the image abnormalities’ ground truth location (ie, area of interest) were removed: fixation duration on the area of interest, number of fixations on the area of interest, and the time between trial start and the first fixation on the area of interest. This ensures that the proposed approach can be more easily and widely applied in clinical settings without the need for extensive preparatory work. It also helps mitigate potential variability and subjectivity in the training data, leading to more robust and generalizable models. Furthermore, by excluding features tied to known locations abnormally, we can better capture inherent differences in visual search patterns between experienced and less experienced radiologists.</p>
          <p><xref ref-type="table" rid="table1">Table 1</xref> summarizes the remaining attribute names, descriptions, and expected association with levels of expertise. All features were used as originally defined except for coverage. Salient regions refer to areas of an image that are not part of a peripheral black background. This is typically necessary because users may be viewing scans with different amounts of background area. As noted previously, we used the Tobii gaze data without fixation postprocessing. For evaluating traditional features using fixations in the Tobii dataset, we substituted raw gaze data with fixation data. For purposes of clarity and brevity, we use fixations and gaze interchangeably for the remainder of the paper.</p>
        </sec>
      </sec>
      <sec>
        <title>Proposed Approach: Discretized Vector Encoding for Fixation Data</title>
        <p>Here, we describe the proposed method for directly using the fixation patterns as an alternative approach to using the current and previously described attributes in <xref ref-type="table" rid="table1">Table 1</xref>. The proposed strategy aims to extract information from fixations in the following 2 ways: first, geometric alignment: this involves mapping the coordinates of eye fixations on the chest x-ray images into a Cartesian grid. Each fixation is assigned to a specific grid cell based on its position, such as the Cartesian locations of the fixations when displayed on a chest x-ray image. Second, temporal order in which the fixations appear: the order in which fixations occur is crucial. Fixations are split into temporal bins, preserving the sequence of visual inspection. For each trial with recorded fixation data, we split the fixations into <italic>t</italic> number of temporal bins (each bin covers “total time divided by the number of bins in seconds”) or groups before counting the number of fixations captured within square grids or subdivisions of size <italic>x</italic>. Then, the <italic>t</italic> number of <italic>x-</italic>by<italic>-x</italic> grids is encoded into a single vector of size <italic>1</italic>-by<italic>-(x×x×t).</italic> The overall procedure is described in pseudocode in algorithm 1 (<xref ref-type="boxed-text" rid="box1">Textbox 1</xref>) and illustrated in <xref rid="figure3" ref-type="fig">Figure 3</xref>.</p>
        <boxed-text id="box1" position="float">
          <title>Discrete vector encoding for fixation data.</title>
          <p>
            <bold>Algorithm 1: Vector encoding for fixation data</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Input</p>
            </list-item>
            <list-item>
              <p>n-Fixation coordinates of a single trial, F[n×2], number of x- and y-axis subdivisions, (x,y), number of temporal groups, (t)</p>
            </list-item>
            <list-item>
              <p>Output</p>
            </list-item>
            <list-item>
              <p>Encoded vector, V[1×(t×x×y)]</p>
            </list-item>
            <list-item>
              <p>Initialize</p>
            </list-item>
            <list-item>
              <p>Create array A[t×x×y] and centroids C[(x×y)×2] corresponding to the center of each grid subdivision (defined by second and third indices of A</p>
            </list-item>
            <list-item>
              <p>Evenly split fixations into t-groups, T = ([F1, F2, ⋯]1, ⋯, [⋯, F(n-1) Fn ]t)</p>
            </list-item>
            <list-item>
              <p>Procedure</p>
            </list-item>
            <list-item>
              <p>For <italic>i</italic>= 1 →&#62;<italic>t</italic> do:</p>
              <p><italic>f</italic> = <italic>T</italic><sub>i</sub></p>
              <p>For <italic>j</italic> = 1 → <italic>len</italic> (<italic>f</italic>) do:</p>
              <p><italic>C</italic><sup>*</sup>=<italic>argmin</italic> (&#124;&#124; <italic>C</italic>-<italic>f</italic><sub>j</sub> &#124;&#124;)</p>
              <p><italic>A</italic> [i, <italic>C</italic><sub>x</sub><sup>*</sup>, <italic>C</italic><sub>y</sub><sup>*</sup>] += 1</p>
              <p><italic>V</italic> = <italic>vec</italic>(<italic>A</italic>)</p>
              <p>Return <italic>V</italic></p>
            </list-item>
          </list>
        </boxed-text>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Proposed discretized vector encoding for fixation data. Bins 1, 2, and 3 capture fixations in a preserved spatial dimension across different temporal windows. Each row represents a temporal bin, and within each bin, the chest x-ray image is divided into spatial grids. The fixations are counted within each grid cell, providing a detailed representation of the radiologist’s visual search pattern over time.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e53928_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>In <xref rid="figure3" ref-type="fig">Figure 3</xref>, participant 1 inspects a single chest x-ray image; the processed fixations are captured as illustrated gold squares with red edges, with the first fixation labeled as a blue cross and the last fixation visualized as a magenta star. In this example, the fixations are split into 3 temporal segments (Step 1), in which 3-by-3 grids count the number of fixations within them (Step 2). Then, the proposed algorithm outputs the final encoding vector as the flattening and concatenation of the set of 3-by-3 grids (Step 3). For a given (square) grid of size <italic>x</italic> and <italic>t</italic> number of temporal segments, the final yielded output vector is of length, regardless of trial temporal duration. Segmenting the raw data into fixed temporal segments is one of the benefits of this approach and a strategy developed and imposed to generate consistent numbers of variables on the encoding output across different trials. As the number of fixations across each trial can vary between participants, fixing the number of temporal segments allows the capturing of trial duration while conforming to a prescribed number of grid subdivisions and temporal groups. For example, with <xref rid="figure3" ref-type="fig">Figure 3</xref> as a reference, the final encoded vector will yield a vector with larger values therein for longer trials and yield a sparse vector (with lower values or values of zero) for shorter trials. Users can increase the fidelity of the grid and the number of time groups to represent a continuous spatiotemporal domain more closely. It is notable that the proposed methodology possesses the capability for tensor configuration for use in deep learning architecture by using <italic>t</italic> layers of grids. This tensor configuration is not studied in the paper due to the small sample size. The introduced technique is designed with more accessible or simpler classifiers in mind.</p>
        <p>
          <bold>Performance Metric and Simulation Setup</bold>
        </p>
        <p>To evaluate the discriminability of participants using the proposed approach, we use a stratified-fold cross-validation to calculate the AUC metric for several classification models, where each of the folds contains 5 trials from both levels of experience as the hold-out set. The study was performed on the data acquired by the EyeLink and Tobii equipment separately, and the following sections will contain an elaboration on their respective results. We performed cross-validations on a full-factorial configuration of 5, 7, 10, and 15 square grid subdivisions and 3, 5, 10, and 20 temporal groupings and selected the settings for each classifier that yielded the best results. In the presentation of these results, the average scores were calculated by computing the AUC metrics at the lowest level (data acquisition method, classifier, data type, feature extraction method, grid-size, temporal-group, and cross-validation seed) and averaged to the presented levels of granularity. Given the small sample size of 110 (EyeLink dataset) and 216 (Tobii dataset) trials, and high dimensionality in the chosen configurations (up to 4500 encoded variables in our study), there are available pathways that we have used to alleviate the effects of the curse of dimensionality present [<xref ref-type="bibr" rid="ref28">28</xref>], such as principal component analysis (PCA) [<xref ref-type="bibr" rid="ref29">29</xref>] and kernel principal component analysis (KPCA) [<xref ref-type="bibr" rid="ref30">30</xref>]. The feature extraction and dimensionality reduction methods used include reducing the input data to 2 dimensions (with varying amounts of explained variance) and fixing the amount of variance explained to 50%, 90%, and 99% (with varying numbers of dimensions). These techniques were used not only to reduce the density of the data but also to introduce an additional preprocessing step that leverages the spectral decomposition of data collected from each participant.</p>
        <p>Some of the major reasons for considering PCA and KPCA instead of the other alternatives include the following: PCA and KPCA are among the most popular method of dimensionality reduction; most technical practitioners, especially in the field of medicine, are familiar with PCA and KPCA; PCA and KPCA have rigorous mathematical properties and commonly used baseline methods in statistical analysis; and PCA and KPCA have relatively low computational complexity compared with many of the other shallow and deep alternatives.</p>
        <p>All the codes were written using Python. The used libraries and versions are as follows: <italic>matplotlib</italic> (3.7.1), <italic>seaborn</italic> (0.12.2), <italic>tqdm</italic> (4.65.0), <italic>scipy</italic> (1.8.0), <italic>scikit-learn</italic> (1.0.2), <italic>xgboost</italic> (1.7.5), <italic>GPy</italic> (1.10.0), <italic>numpy</italic> (1.21.6), <italic>pandas</italic> (2.0.1), and <italic>joblib</italic> (1.2.0).</p>
      </sec>
      <sec>
        <title>Ethical Considerations</title>
        <p>This study was conducted in full compliance with human participant research ethics and was reviewed and approved by the University of Texas Health San Antonio Institutional Review Board (20190533HU). All participants were fully informed about the purpose and procedures of the study, and informed consent was obtained before their inclusion. To ensure the privacy and confidentiality of participant data, all identifying information was removed to anonymize the dataset before analysis. Furthermore, participants were compensated $400 USD for their time and involvement in the study.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Competing Algorithms and Training</title>
        <p>In this study, we use the Gaussian process, logistic regression, and k-nearest neighbors classifiers from the <italic>Scikit-learn</italic> [<xref ref-type="bibr" rid="ref31">31</xref>] package; the extreme gradient boosting (XGBoost) [<xref ref-type="bibr" rid="ref32">32</xref>] tree-based ensemble classifier; and a modified AlexNet [<xref ref-type="bibr" rid="ref33">33</xref>] deep learning classifier. The <italic>Scikit-learn</italic> classifiers were selected for their accessibility to users, while the XGBoost and AlexNet-like neural networks were chosen as more complex classifiers. The logistic regression, k-nearest neighbors, and XGBoost classifiers used <italic>Scikit-learn</italic>’s <italic>StratifiedKFold</italic> and <italic>GridSearchCV</italic> packages to train on the balanced accuracy loss function (also defined by <italic>Scikit-learn</italic>), while the Gaussian process methodology used Laplace approximation as detailed in their documentation [<xref ref-type="bibr" rid="ref31">31</xref>]. Finally, the AlexNet-like classifier used sparse categorical cross-entropy [<xref ref-type="bibr" rid="ref34">34</xref>] for training.</p>
      </sec>
      <sec>
        <title>EyeLink Dataset</title>
        <p><xref rid="figure4" ref-type="fig">Figure 4</xref> illustrates the average AUC across each classifier tasked with distinguishing between 2 participants (particularly between 2 levels of experience) using either the traditional or the proposed encoded data types (features). Along with the original data types, we include the average AUC of the classifiers based on the usage of select feature extraction configurations. The encoded features extracted from the raw dataset, shown on the left, illustrate a consistently high AUC score compared with the traditional features shown on the right, implying that the model performance for each classifier (except for certain feature extraction configurations of the AlexNet model) has high discriminatory power under optimal spatiotemporal encoding settings. </p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Numerical study results on the area under the curve metric reported for each classifier when consuming the EyeLink dataset, organized by the aggregated average of classifier, data type, and select feature extraction levels using the original dataset, principal component analysis (PCA), and kernel principal component analysis (KPCA). Alex: AlexNet-like neural network classifier; Average: average of all classifiers; GP: Gaussian process; KNN: k-nearest neighbors; LR: logistic regression; XGBoost: extreme gradient boosting;.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e53928_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>We also present the AUC metrics from <xref rid="figure4" ref-type="fig">Figure 4</xref> below in <xref ref-type="table" rid="table2">Table 2</xref>. The performance of the classifiers using the encoded data type consistently yielded higher discriminatory power than those using the traditional data type across all feature extraction methods. Encoding the fixation data into the proposed spatiotemporal elements provides more information each classifier can use to determine the experience level of a given participant more effectively than using traditional attributes. This table illustrates the original encoded data to possess the highest performance, with AUC scores consistently above 0.98 across all classifiers. However, usage of the traditional data yields roughly 0.522 at worst, as seen in the reported results for the AlexNet classifier. This trend of encoded data providing better results is also seen when using feature extraction; although a performance decrease is observable when reducing dimensions either through an information covariance matrix (PCA) or spatial relation (KPCA), the use of encoded data still outperforms those corresponding to the use of the traditional data. This suggests that the loss in information due to dimensionality reduction can be considered negligible in light of the benefits of using spatiotemporal encoding. The lower relative performance of the AlexNet-like classifier is likely caused by the number of training samples available in this study. The report on AUC in the table for the classifier is higher for the encoded data type, where it is observable that using the data without dimensionality reduction provides the best performance. This effect has been studied in Sumner and Alaeddini [<xref ref-type="bibr" rid="ref35">35</xref>], in which neural networks already perform feature extraction throughout each present layer; this supportively evidences the reported results here, whereas (besides the small dataset) performing feature extraction beforehand may not provide enough information for the network to use its architecture to its fullest potential.</p>
        <table-wrap position="float" id="table2">
          <label>Table 2</label>
          <caption>
            <p>Numerical tabulation of area under the curve scores across each classifier and data type and select feature extraction methods.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="240"/>
            <col width="140"/>
            <col width="200"/>
            <col width="130"/>
            <col width="130"/>
            <col width="130"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Feature extraction method and data type</td>
                <td colspan="5">Classifier for the EyeLink dataset, AUC<sup>a</sup></td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td>AlexNet</td>
                <td>Gaussian process</td>
                <td>KNN<sup>b</sup></td>
                <td>LR<sup>c</sup></td>
                <td>XGBoost<sup>d</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="7">
                  <bold>KPCA<sup>e</sup> (2D)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Encoded</td>
                <td>0.801</td>
                <td>1.000</td>
                <td>0.980</td>
                <td>1.000</td>
                <td>0.980</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Traditional</td>
                <td>0.336</td>
                <td>0.978</td>
                <td>0.967</td>
                <td>0.970</td>
                <td>0.914</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold>KPCA (50%)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Encoded</td>
                <td>0.501</td>
                <td>1.000</td>
                <td>0.996</td>
                <td>1.000</td>
                <td>0.980</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Traditional</td>
                <td>0.292</td>
                <td>0.978</td>
                <td>0.967</td>
                <td>0.970</td>
                <td>0.914</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold>Original</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Encoded</td>
                <td>0.985</td>
                <td>1.000</td>
                <td>1.000</td>
                <td>1.000</td>
                <td>0.991</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Traditional</td>
                <td>0.522</td>
                <td>1.000</td>
                <td>0.900</td>
                <td>1.000</td>
                <td>0.914</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold>PCA<sup>f</sup> (2D)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Encoded</td>
                <td>0.818</td>
                <td>0.978</td>
                <td>0.991</td>
                <td>1.000</td>
                <td>0.991</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Traditional</td>
                <td>0.500</td>
                <td>0.866</td>
                <td>0.870</td>
                <td>0.806</td>
                <td>0.830</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold>PCA (50%)</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Encoded</td>
                <td>0.611</td>
                <td>1.000</td>
                <td>0.991</td>
                <td>1.000</td>
                <td>0.991</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Traditional</td>
                <td>0.500</td>
                <td>0.866</td>
                <td>0.870</td>
                <td>0.806</td>
                <td>0.830</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table2fn1">
              <p><sup>a</sup>AUC: area under the curve.</p>
            </fn>
            <fn id="table2fn2">
              <p><sup>b</sup>KNN: k-nearest neighbor.</p>
            </fn>
            <fn id="table2fn3">
              <p><sup>c</sup>LR: logistic regression.</p>
            </fn>
            <fn id="table2fn4">
              <p><sup>d</sup>XGBoost: extreme gradient boosting.</p>
            </fn>
            <fn id="table2fn5">
              <p><sup>e</sup>KPCA: kernel principal component analysis.</p>
            </fn>
            <fn id="table2fn6">
              <p><sup>f</sup>PCA: principal component analysis.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <p>By using the encoded vectors for classification, differences in eye-tracking patterns can more consistently be distinguished between the 2 participants. <xref rid="figure5" ref-type="fig">Figures 5</xref> and <xref rid="figure6" ref-type="fig">6</xref> illustrate one such difference in search pattern behavior. The more experienced participant (participant 1, <xref rid="figure5" ref-type="fig">Figure 5</xref>) shows a more uniformly distributed search pattern across the chest x-ray. In contrast, the less-experienced participant (participant 2, <xref rid="figure6" ref-type="fig">Figure 6</xref>) focuses on regions where they suspect abnormalities. It is clear from a visual inspection that the behavior between these participants is markedly different and using the correct spatiotemporal configurations to capture the differences between the 2 participant’s behavior by leveraging the proposed methodology (as reported numerically in <xref ref-type="table" rid="table2">Table 2</xref>) provides a consistent improvement of classification accuracy.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Scan of chest x-ray by participant 1 (faculty).</p>
          </caption>
          <graphic xlink:href="formative_v9i1e53928_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>Scan of chest x-ray by participant 2 (trainee).</p>
          </caption>
          <graphic xlink:href="formative_v9i1e53928_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Tobii Dataset</title>
        <p>We have performed the same analysis on the data acquired using Tobii eye-tracking equipment. It is notable that although the AUC scores from the EyeLink dataset are consistently high, natural anticipations allow one to observe more variation in classifier performance when more individual participants (classified as either a more-experienced faculty or less-experienced trainee) are introduced to the study. <xref rid="figure7" ref-type="fig">Figure 7</xref> illustrates a report on AUC in a similar fashion to that in <xref rid="figure4" ref-type="fig">Figure 4</xref>, with lower scores across all classifying models for both data types. As seen in <xref rid="figure4" ref-type="fig">Figure 4</xref>, <xref rid="figure7" ref-type="fig">Figure 7</xref> also suggests that the best performance for the encoded data on average is attained when using it without feature extraction, although, for several cases, we can observe that some form of feature extraction yields better results than their respective traditional dataset counterparts.</p>
        <fig id="figure7" position="float">
          <label>Figure 7</label>
          <caption>
            <p>Numerical study results on the area under the curve metric reported for each classifier when consuming the Tobii dataset, organized by the aggregated average of classifier, data type, and selected feature extraction levels using the original dataset, principal component analysis (PCA), and kernel principal component analysis (KPCA). Alex: AlexNet-like neural network classifier; Average: average of all classifiers; GP: Gaussian process; KNN: k-nearest neighbors; LR: logistic regression; XGBoost: extreme gradient boosting;.</p>
          </caption>
          <graphic xlink:href="formative_v9i1e53928_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>When inspecting <xref ref-type="table" rid="table3">Tables 3</xref>, we can numerically inspect the average (and variance of) AUC, <italic>F</italic><sub>1</sub>-score, accuracy, specificity, and sensitivity of each classifier when consuming each data type in both datasets. Within the Tobii dataset, the encoded data type generally outperformed (shown in italics) the traditional data type across most metrics and models. Although the encoded data type that was consumed within the Tobii dataset possessed more discriminatory capability than that in the traditional data, the performance gap was less pronounced than those observable in the EyeLink dataset. For example, the Tobii average (and variance) AUC scores for the encoded data type ranged from 0.55 (0.05) to 0.82 (0.04), while the traditional data type ranged from 0.51 (0.07) to 0.76 (0.05), and the EyeLink average (variance) AUC scores for the same data types ranged from 0.63 (0.07) to 1.0 (0.0) and from 0.52 (0.08) to 0.96 (0.01), respectively. There is a consistent trend across datasets that support the encoded data are capable of providing higher values of accuracy and performance; the <italic>F</italic><sub>1</sub>-score for the Gaussian process, k-nearest neighbors, logistic regression, and XGBoost were consistently higher when using the encoded data than when using traditional attributes in classification. This highlights the ability of the proposed encoding procedure to improve the balance between precision and recall in the classifiers and, as a result, the overall effectiveness of each model’s predictions. In terms of specificity, the encoded data type is also shown to have a competitive edge in boosting a classifier’s ability to correctly identify true negative class labels (experienced participants). As seen in <xref ref-type="table" rid="table3">Table 3</xref>, the average range of improvement lies between 0.01 to 0.04 for the EyeLink dataset and between –0.2 to +0.15 for the Tobii dataset; the negative value of the improvement is seen with the AlexNet-like model, which, as explained before, may have difficulty fitting well for classification on small datasets, made more difficult by the variation in subconscious behavior between participants that are recorded in spatiotemporal encodings by the proposed methodology.</p>
        <table-wrap position="float" id="table3">
          <label>Table 3</label>
          <caption>
            <p>Numerical tabulation of mean and variance area under the curve, <italic>F</italic><sub>1</sub>-score, accuracy, specificity, and sensitivity across each data acquisition method, classifier, and data type.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="210"/>
            <col width="200"/>
            <col width="200"/>
            <col width="0"/>
            <col width="180"/>
            <col width="180"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Metric and classifier</td>
                <td colspan="5">Data acquisition system</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="3">EyeLink</td>
                <td colspan="2">Tobii</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td colspan="3">Data type</td>
                <td colspan="2">Data type</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <break/>
                </td>
                <td>Encoded, average (SD)</td>
                <td>Traditional, average (SD)</td>
                <td colspan="2">Encoded, average (SD)</td>
                <td>Traditional, average (SD)</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="7">
                  <bold>Area under the curve</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AlexNet</td>
                <td>
                  <italic>0.63 (0.07)</italic>
                  <sup>a</sup>
                </td>
                <td>0.52 (0.08)</td>
                <td colspan="2">
                  <italic>0.55 (0.05)</italic>
                </td>
                <td>0.51 (0.07)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>GP<sup>b</sup></td>
                <td>
                  <italic>1 (0)</italic>
                </td>
                <td>0.96 (0.01)</td>
                <td colspan="2">
                  <italic>0.82 (0.04)</italic>
                </td>
                <td>0.76 (0.05)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>KNN<sup>c</sup></td>
                <td>
                  <italic>0.96 (0.01)</italic>
                </td>
                <td>0.93 (0.01)</td>
                <td colspan="2">0.73 (0.05)</td>
                <td>
                  <italic>0.74 (0.04)</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>LR<sup>d</sup></td>
                <td>
                  <italic>1 (0)</italic>
                </td>
                <td>0.93 (0.01)</td>
                <td colspan="2">
                  <italic>0.82 (0.04)</italic>
                </td>
                <td>0.71 (0.08)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>XGBoost<sup>e</sup></td>
                <td>
                  <italic>0.97 (0.0)</italic>
                </td>
                <td>0.91 (0.01)</td>
                <td colspan="2">
                  <italic>0.73 (0.05)</italic>
                </td>
                <td>0.71 (0.05)</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold><italic>F</italic><sub>1</sub>-score</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AlexNet</td>
                <td>
                  <italic>0.43 (0.1)</italic>
                </td>
                <td>0.41 (0.09)</td>
                <td colspan="2">
                  <italic>0.39 (0.1)</italic>
                </td>
                <td>0.23 (0.1)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>GP</td>
                <td>
                  <italic>0.98 (0)</italic>
                </td>
                <td>0.90 (0.03)</td>
                <td colspan="2">
                  <italic>0.73 (0.07)</italic>
                </td>
                <td>0.71 (0.05)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>KNN</td>
                <td>
                  <italic>0.90 (0.03)</italic>
                </td>
                <td>0.88 (0.02)</td>
                <td colspan="2">0.61 (0.1)</td>
                <td>
                  <italic>0.71 (0.04)</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>LR</td>
                <td>
                  <italic>0.98 (0)</italic>
                </td>
                <td>0.82 (0.04)</td>
                <td colspan="2">
                  <italic>0.74 (0.06)</italic>
                </td>
                <td>0.58 (0.09)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>XGBoost</td>
                <td>
                  <italic>0.96 (0)</italic>
                </td>
                <td>0.87 (0.02)</td>
                <td colspan="2">
                  <italic>0.68 (0.07)</italic>
                </td>
                <td>0.67 (0.05)</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold>Accuracy</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AlexNet</td>
                <td>
                  <italic>0.52 (0.02)</italic>
                </td>
                <td>0.49 (0.04)</td>
                <td colspan="2">
                  <italic>0.51 (0.0)</italic>
                </td>
                <td>0.5 (0.01)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>GP</td>
                <td>
                  <italic>0.99 (0.0)</italic>
                </td>
                <td>0.93 (0.01)</td>
                <td colspan="2">
                  <italic>0.76 (0.03)</italic>
                </td>
                <td>0.7 (0.03)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>KNN</td>
                <td>
                  <italic>0.93 (0.01)</italic>
                </td>
                <td>0.9 (0.01)</td>
                <td colspan="2">0.69 (0.03)</td>
                <td>
                  <italic>0.7 (0.02)</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>LR</td>
                <td>
                  <italic>0.98 (0.0)</italic>
                </td>
                <td>0.88 (0.01)</td>
                <td colspan="2">
                  <italic>0.77 (0.03)</italic>
                </td>
                <td>0.64 (0.03)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>XGBoost</td>
                <td>
                  <italic>0.96 (0.0)</italic>
                </td>
                <td>0.9 (0.01)</td>
                <td colspan="2">
                  <italic>0.72 (0.03)</italic>
                </td>
                <td>0.67 (0.03)</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold>Specificity</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AlexNet</td>
                <td>
                  <italic>0.43 (0.22)</italic>
                </td>
                <td>0.42 (0.22)</td>
                <td colspan="2">0.46 (0.16)</td>
                <td>
                  <italic>0.66 (0.12)</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>GP</td>
                <td>
                  <italic>0.98 (0)</italic>
                </td>
                <td>0.97 (0.01)</td>
                <td colspan="2">
                  <italic>0.80 (0.04)</italic>
                </td>
                <td>0.65 (0.07)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>KNN</td>
                <td>
                  <italic>0.97 (0.01)</italic>
                </td>
                <td>0.93 (0.01)</td>
                <td colspan="2">
                  <italic>0.76 (0.06)</italic>
                </td>
                <td>0.67 (0.04)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>LR</td>
                <td>
                  <italic>0.99 (0.0)</italic>
                </td>
                <td>0.97 (0.01)</td>
                <td colspan="2">
                  <italic>0.81 (0.04)</italic>
                </td>
                <td>0.67 (0.08)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>XGBoost</td>
                <td>
                  <italic>0.95 (0.01)</italic>
                </td>
                <td>0.92 (0.01)</td>
                <td colspan="2">
                  <italic>0.76 (0.05)</italic>
                </td>
                <td>0.63 (0.08)</td>
              </tr>
              <tr valign="top">
                <td colspan="7">
                  <bold>Sensitivity</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>AlexNet</td>
                <td>
                  <italic>0.64 (0.22)</italic>
                </td>
                <td>0.57 (0.2)</td>
                <td colspan="2">
                  <italic>0.56 (0.17)</italic>
                </td>
                <td>0.32 (0.12)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>GP</td>
                <td>
                  <italic>0.99 (0)</italic>
                </td>
                <td>0.89 (0.04)</td>
                <td colspan="2">0.72 (0.07)</td>
                <td>
                  <italic>0.76 (0.05)</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>KNN</td>
                <td>
                  <italic>0.89 (0.05)</italic>
                </td>
                <td>0.87 (0.03)</td>
                <td colspan="2">0.61 (0.1)</td>
                <td>
                  <italic>0.74 (0.04)</italic>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>LR</td>
                <td>
                  <italic>0.98 (0.01)</italic>
                </td>
                <td>0.77 (0.06)</td>
                <td colspan="2">
                  <italic>0.72 (0.06)</italic>
                </td>
                <td>0.6 (0.09)</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>XGBoost</td>
                <td>
                  <italic>0.98 (0)</italic>
                </td>
                <td>0.86 (0.03)</td>
                <td colspan="2">0.68 (0.07)</td>
                <td>
                  <italic>0.71 (0.05)</italic>
                </td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table3fn1">
              <p><sup>a</sup>Superior values are italicized.</p>
            </fn>
            <fn id="table3fn2">
              <p><sup>b</sup>GP: Gaussian process.</p>
            </fn>
            <fn id="table3fn3">
              <p><sup>c</sup>KNN: k-nearest neighbor.</p>
            </fn>
            <fn id="table3fn4">
              <p><sup>d</sup>LR: logistic regression.</p>
            </fn>
            <fn id="table3fn5">
              <p><sup>e</sup>XGBoost: extreme gradient boosting.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>In this study, we demonstrated the capacity of eye-tracking technology, combined with machine learning algorithms, to discriminate between radiologists’ experience levels. For this purpose, we developed a novel feature encoding technique where fixations are first spatially arranged according to their Cartesian coordinates on chest x-ray images and temporally ordered. The fixations are then subdivided into predefined temporal bins, and within each bin, we count the number of eye fixations within each subdivision. These counted bins are then concatenated to form a vector encoding to be used as feature input for machine learning algorithms. Our experiments showed that the discretized vector encoding creates discriminative features that are not captured by conventional metrics. Using the encoding approach allows classifiers to better distinguish between participants in terms of experience level, which highlights performance gains (when compared with using traditional features for discrimination) of 6.9%, 7.11%, 9.14%, 9.59%, and 5.61% for AUC, accuracy, <italic>F</italic><sub>1</sub>-score, sensitivity, and specificity, respectively, aggregated across both EyeLink and Tobii datasets in <xref ref-type="table" rid="table3">Table 3</xref>. The Tobii dataset exhibits a lower performance gain (6.41%, 7.48%, 8.62%, 5.11%, and 9.45%) than observed using the EyeLink dataset (7.29%, 6.83%, 9.54%, and 13.13%) due to using a more diverse roster of participants; however, the trend in using the proposed eye-tracking encoding approach possessing the competitive edge is still present, highlighting the effectiveness of spatiotemporal assortment in the introduced method. These results validate our initial hypothesis that when appropriately encoded, eye-tracking data can provide nuanced insights into the difference between radiologist’s expertise levels.</p>
        <p>We can also observe the perceptual strategies radiologists use during diagnostic evaluations. Previous research has often focused on more general eye-tracking metrics without leveraging the full potential of machine learning to analyze the data. For example, studies by van der Gijp et al [<xref ref-type="bibr" rid="ref11">11</xref>] and Waite et al [<xref ref-type="bibr" rid="ref3">3</xref>] explored how visual search patterns correlate with diagnostic accuracy and expertise. With the help of the proposed encoding method, such machine learning models can be developed to determine expertise level and has the potential to identify and track potential features from eye fixations or gaze fixations.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>While our study has shown promising results and potential benefits, it is important to acknowledge limitations that may have a degree of effect on our findings. One such limitation is sample size; across both the EyeLink and Tobii datasets, there were 2 participants in one study (EyeLink) and 8 participants in the other (Tobii), with both containing small numbers of images scanned by each participant. Another condition involves the variation in data acquisition. In total, 2 different eye-tracking devices (EyeLink and Tobii) were used for data collection, and while serving the same overarching purpose of collecting data, some additional variability in the findings are notably attributed to the usage of 2 different hardware-software configurations. Another important consideration includes the difference in traditional feature sets between the EyeLink and Tobii datasets. Coupled with data acquisition differences, some features from the EyeLink software were not congruent with the Tobii dataset, such as the usage of fixations (EyeLink) versus gaze (Tobii). When applying the encoding approach to these datasets, the Tobii dataset had larger yielded values in each output vector. This did not affect the results substantially; however, it underscores the challenge of directly comparing data from 2 sources. One final consideration was our decision to remove certain metrics related to the location of abnormalities in chest x-rays as features in the traditional data type during performance evaluation. For example, we did not consider the time to first fixation on the region of abnormality. This and other like attributes possess statistical significance in previous works; however, their inclusion necessitates extensive labeling, validation, and other processing in order to establish ground truth information for each image scanned by each participant.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>Despite the limitations above, this study holds significant promise and offers a range of benefits worthy of attention and consideration for use. By extracting spatiotemporal features from eye-tracking data, the proposed approach has demonstrated the capacity to differentiate users based on eye-tracking behavior alone instead of traditional methods and can be extended for use in fields ranging from medical to educational. The approach enables discriminability between users and offers a pathway to gaining deeper insights into generalized differences between levels of expertise. By exploring these pathways, this approach holds the potential to establish more effective educational programs that can assist users optimize their search patterns. Respective to the study conducted, by providing support to radiologists to find abnormalities quickly and accurately in chest x-rays, our approach seeks to reduce perceptual errors in medical diagnoses. In fields where the development of unique and precise search patterns is important, the proposed approach offers a valuable source of knowledge transfer. Transmission of expertise from more-experienced individuals to less-experienced individuals can be facilitated and result in increased streamlining during the learning process and yield more efficient and accurate search patterns. The potential benefits can apply to professionals and trainees or students alike.</p>
        <p>In summary, we have shown the potential for spatiotemporal features extracted from eye-tracking data to be useful in discriminating between radiologists of different skill levels and opening the door to improving education. We plan to augment this research by increasing the number of radiologists to demonstrate generalizability and exploring additional types of spatiotemporal analyses. The implications of our findings extend beyond radiology, suggesting that similar methodologies could revolutionize training and assessment protocols in various fields that rely on visual cognition like aviation and ground transportation. Further research could explore the integration of these techniques into real-time training tools, potentially transforming educational paradigms in professions requiring visual expertise.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AUC</term>
          <def>
            <p>area under the curve</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">IRB</term>
          <def>
            <p>institutional review board</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">PCA</term>
          <def>
            <p>principal component analysis</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">KPCA</term>
          <def>
            <p>kernel principal component analysis</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">XGBoost</term>
          <def>
            <p>extreme gradient boosting</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research is funded by San Antonio Medical Foundation, and principal investigators KC (University of Texas Health Science Center at San Antonio) and EJG (University of Texas at San Antonio).</p>
    </ack>
    <notes>
      <sec>
        <title>Data Availability</title>
        <p>Data collected and analyzed in this study are available upon request. Interested researchers can obtain the data by completing a data request form, which includes their planned project outline and the details of the person responsible for handling the data. This process ensures that the data are used responsibly and for scientifically valid purposes. The request forms are reviewed on a case-by-case basis to ensure alignment with ethical and research standards. Images and annotations were obtained from the National Institutes of Health Clinical Center and can be downloaded on the web [<xref ref-type="bibr" rid="ref36">36</xref>]. The Institutional Review Board restricts sharing of the gaze data. In case further information, please contact KC (clarkkl@uthscsa.edu).</p>
      </sec>
    </notes>
    <fn-group>
      <fn fn-type="conflict">
        <p>KC holds equity in Zauron Labs Inc and NC is a co-owner of Zauron Labs.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>An update on cancer deaths in the United States</article-title>
          <source>Centers for Disease Control and Prevention</source>
          <year>2022</year>
          <month>2</month>
          <day>28</day>
          <access-date>2022-02-28</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://stacks.cdc.gov/view/cdc/119728">https://stacks.cdc.gov/view/cdc/119728</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <article-title>Screening for lung caner: U.S. Preventitive Services Task Force recommendation statement</article-title>
          <source>Lung Cancer Research foundation</source>
          <year>2014</year>
          <access-date>2024-09-23</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.lungcancerresearchfoundation.org/screening-for-lung-cancer-u-s-preventive-services-task-force-recommendation-statement/">https://www.lungcancerresearchfoundation.org/screening-for-lung-cancer-u-s-preventive-services-task-force-recommendation-statement/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Waite</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Grigorian</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Alexander</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Macknik</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Carrasco</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Heeger</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Martinez-Conde</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Analysis of perceptual expertise in radiology - current knowledge and a new perspective</article-title>
          <source>Front Hum Neurosci</source>
          <year>2019</year>
          <volume>13</volume>
          <fpage>213</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31293407"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnhum.2019.00213</pub-id>
          <pub-id pub-id-type="medline">31293407</pub-id>
          <pub-id pub-id-type="pmcid">PMC6603246</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bruno</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Walker</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Abujudeh</surname>
              <given-names>HH</given-names>
            </name>
          </person-group>
          <article-title>Understanding and confronting our mistakes: the epidemiology of error in radiology and strategies for error reduction</article-title>
          <source>Radiographics</source>
          <year>2015</year>
          <volume>35</volume>
          <issue>6</issue>
          <fpage>1668</fpage>
          <lpage>1676</lpage>
          <pub-id pub-id-type="doi">10.1148/rg.2015150023</pub-id>
          <pub-id pub-id-type="medline">26466178</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kundel</surname>
              <given-names>HL</given-names>
            </name>
          </person-group>
          <article-title>Perception errors in chest radiography</article-title>
          <source>Seminars in Respiratory Medicine</source>
          <year>1989</year>
          <volume>10</volume>
          <issue>03</issue>
          <fpage>203</fpage>
          <lpage>210</lpage>
          <pub-id pub-id-type="doi">10.1055/s-2007-1006173</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>LR</given-names>
            </name>
          </person-group>
          <article-title>Felson's Principles of Chest Roentgenology: A Programmed Text. 2nd ed</article-title>
          <source>Radiology</source>
          <year>2000</year>
          <month>3</month>
          <volume>214</volume>
          <issue>3</issue>
          <fpage>848</fpage>
          <pub-id pub-id-type="doi">10.1148/radiology.214.3.r00fe55848</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tanoubi</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Tourangeau</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Sodoké</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Perron</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Drolet</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Bélanger</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Morris</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ranger</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Paradis</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Robitaille</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Georgescu</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Comparing the visual perception according to the performance using the eye-tracking technology in high-fidelity simulation settings</article-title>
          <source>Behav Sci (Basel)</source>
          <year>2021</year>
          <volume>11</volume>
          <issue>3</issue>
          <fpage>31</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=bs11030031"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/bs11030031</pub-id>
          <pub-id pub-id-type="medline">33807673</pub-id>
          <pub-id pub-id-type="pii">bs11030031</pub-id>
          <pub-id pub-id-type="pmcid">PMC7998119</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Wolfe</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Eye movements in medical image perception: a selective review of past, present and future</article-title>
          <source>Vision (Basel)</source>
          <year>2019</year>
          <volume>3</volume>
          <issue>2</issue>
          <fpage>32</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=vision3020032"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/vision3020032</pub-id>
          <pub-id pub-id-type="medline">31735833</pub-id>
          <pub-id pub-id-type="pii">vision3020032</pub-id>
          <pub-id pub-id-type="pmcid">PMC6802791</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kelly</surname>
              <given-names>BS</given-names>
            </name>
            <name name-style="western">
              <surname>Rainford</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Darcy</surname>
              <given-names>SP</given-names>
            </name>
            <name name-style="western">
              <surname>Kavanagh</surname>
              <given-names>EC</given-names>
            </name>
            <name name-style="western">
              <surname>Toomey</surname>
              <given-names>RJ</given-names>
            </name>
          </person-group>
          <article-title>The development of expertise in radiology: in chest radiograph interpretation, "Expert" search pattern may predate "Expert" levels of diagnostic accuracy for pneumothorax identification</article-title>
          <source>Radiology</source>
          <year>2016</year>
          <volume>280</volume>
          <issue>1</issue>
          <fpage>252</fpage>
          <lpage>260</lpage>
          <pub-id pub-id-type="doi">10.1148/radiol.2016150409</pub-id>
          <pub-id pub-id-type="medline">27322975</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wolfe</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Lyu</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Dong</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>CC</given-names>
            </name>
          </person-group>
          <article-title>What eye tracking can tell us about how radiologists use automated breast ultrasound</article-title>
          <source>J Med Imaging (Bellingham)</source>
          <year>2022</year>
          <volume>9</volume>
          <issue>4</issue>
          <fpage>045502</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35911209"/>
          </comment>
          <pub-id pub-id-type="doi">10.1117/1.JMI.9.4.045502</pub-id>
          <pub-id pub-id-type="medline">35911209</pub-id>
          <pub-id pub-id-type="pii">22068GR</pub-id>
          <pub-id pub-id-type="pmcid">PMC9315059</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>van der Gijp</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ravesloot</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Jarodzka</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>van der Schaaf</surname>
              <given-names>MF</given-names>
            </name>
            <name name-style="western">
              <surname>van der Schaaf</surname>
              <given-names>IC</given-names>
            </name>
            <name name-style="western">
              <surname>van Schaik</surname>
              <given-names>JPJ</given-names>
            </name>
            <name name-style="western">
              <surname>Ten Cate</surname>
              <given-names>TJ</given-names>
            </name>
          </person-group>
          <article-title>How visual search relates to visual diagnostic performance: a narrative systematic review of eye-tracking research in radiology</article-title>
          <source>Adv Health Sci Educ Theory Pract</source>
          <year>2017</year>
          <volume>22</volume>
          <issue>3</issue>
          <fpage>765</fpage>
          <lpage>787</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/27436353"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10459-016-9698-1</pub-id>
          <pub-id pub-id-type="medline">27436353</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10459-016-9698-1</pub-id>
          <pub-id pub-id-type="pmcid">PMC5498587</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Drew</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Evans</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Võ</surname>
              <given-names>MLH</given-names>
            </name>
            <name name-style="western">
              <surname>Jacobson</surname>
              <given-names>FL</given-names>
            </name>
            <name name-style="western">
              <surname>Wolfe</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Informatics in radiology: what can you see in a single glance and how might this guide visual search in medical images?</article-title>
          <source>Radiographics</source>
          <year>2013</year>
          <volume>33</volume>
          <issue>1</issue>
          <fpage>263</fpage>
          <lpage>274</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/23104971"/>
          </comment>
          <pub-id pub-id-type="doi">10.1148/rg.331125023</pub-id>
          <pub-id pub-id-type="medline">23104971</pub-id>
          <pub-id pub-id-type="pii">rg.331125023</pub-id>
          <pub-id pub-id-type="pmcid">PMC3545617</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kundel</surname>
              <given-names>HL</given-names>
            </name>
            <name name-style="western">
              <surname>Nodine</surname>
              <given-names>CF</given-names>
            </name>
          </person-group>
          <article-title>Interpreting chest radiographs without visual search</article-title>
          <source>Radiology</source>
          <year>1975</year>
          <volume>116</volume>
          <issue>3</issue>
          <fpage>527</fpage>
          <lpage>532</lpage>
          <pub-id pub-id-type="doi">10.1148/116.3.527</pub-id>
          <pub-id pub-id-type="medline">125436</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Swensson</surname>
              <given-names>RG</given-names>
            </name>
          </person-group>
          <article-title>A two-stage detection model applied to skilled visual search by radiologists</article-title>
          <source>Perception &#38; Psychophysics</source>
          <year>1980</year>
          <volume>27</volume>
          <issue>1</issue>
          <fpage>11</fpage>
          <lpage>16</lpage>
          <pub-id pub-id-type="doi">10.3758/bf03199899</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>JZ</given-names>
            </name>
            <name name-style="western">
              <surname>Mountstephens</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Teo</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Eye-tracking feature extraction for biometric machine learning</article-title>
          <source>Front Neurorobot</source>
          <year>2022</year>
          <volume>15</volume>
          <fpage>796895</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35177973"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnbot.2021.796895</pub-id>
          <pub-id pub-id-type="medline">35177973</pub-id>
          <pub-id pub-id-type="pmcid">PMC8843826</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shamyuktha</surname>
              <given-names>RS</given-names>
            </name>
            <name name-style="western">
              <surname>Amudha</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Aiswariya Milan</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>A machine learning framework for classification of expert and non-experts radiologists using eye gaze data</article-title>
          <year>2022</year>
          <conf-name>2022 IEEE 7th International Conference on Recent Advances and Innovations in Engineering (ICRAIE)</conf-name>
          <conf-date>December 01-03, 2022</conf-date>
          <conf-loc>Mangalore, India</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <pub-id pub-id-type="doi">10.1109/icraie56454.2022.10054277</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harezlak</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kasprowska</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Kasprowska</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Eye movement traits in differentiating experts and laymen</article-title>
          <year>2017</year>
          <conf-name>Man-Machine Interactions 5: 5th International Conference on Man-Machine Interactions, ICMMI 2017</conf-name>
          <conf-date>October 3-6, 2017</conf-date>
          <conf-loc>Kraków, Poland</conf-loc>
          <pub-id pub-id-type="doi">10.1007/978-3-319-67792-7_9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Akshay</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Megha</surname>
              <given-names>YJ</given-names>
            </name>
            <name name-style="western">
              <surname>Shetty</surname>
              <given-names>CB</given-names>
            </name>
          </person-group>
          <article-title>Machine learning algorithm to identify eye movement metrics using raw eye tracking data</article-title>
          <year>2020</year>
          <conf-name>2020 Third International Conference on Smart Systems and Inventive Technology (ICSSIT)</conf-name>
          <conf-date>August 20-22, 2020</conf-date>
          <conf-loc>Tirunelveli, India</conf-loc>
          <publisher-name>IEEE</publisher-name>
          <fpage>949</fpage>
          <lpage>955</lpage>
          <pub-id pub-id-type="doi">10.1109/icssit48917.2020.9214290</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rizzo</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Ermini</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Zanca</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Bernabini</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Rossi</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>A machine learning approach for detecting cognitive interference based on eye-tracking data</article-title>
          <source>Front Hum Neurosci</source>
          <year>2022</year>
          <volume>16</volume>
          <fpage>806330</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/35572006"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnhum.2022.806330</pub-id>
          <pub-id pub-id-type="medline">35572006</pub-id>
          <pub-id pub-id-type="pmcid">PMC9101480</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Öder</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Eraslan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yesilada</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Automatically classifying familiar web users from eye-tracking data: a machine learning approach</article-title>
          <source>Turkish Journal of Electrical Engineering and Computer Sciences</source>
          <year>2022</year>
          <volume>30</volume>
          <issue>1</issue>
          <fpage>233</fpage>
          <lpage>248</lpage>
          <pub-id pub-id-type="doi">10.3906/elk-2103-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Karargyris</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kashyap</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lourentzou</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Sharma</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Tong</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abedin</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Beymer</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mukherjee</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Krupinski</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Moradi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Creation and validation of a chest X-ray dataset with eye-tracking and report dictation for AI development</article-title>
          <source>Sci Data</source>
          <year>2021</year>
          <volume>8</volume>
          <issue>1</issue>
          <fpage>92</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41597-021-00863-5"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41597-021-00863-5</pub-id>
          <pub-id pub-id-type="medline">33767191</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41597-021-00863-5</pub-id>
          <pub-id pub-id-type="pmcid">PMC7994908</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bigolin Lanfredi</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Auffermann</surname>
              <given-names>WF</given-names>
            </name>
            <name name-style="western">
              <surname>Chan</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Duong</surname>
              <given-names>PAT</given-names>
            </name>
            <name name-style="western">
              <surname>Srikumar</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Drew</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Schroeder</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Tasdizen</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>REFLACX, a dataset of reports and eye-tracking data for localization of abnormalities in chest x-rays</article-title>
          <source>Sci Data</source>
          <year>2022</year>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>350</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41597-022-01441-z"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41597-022-01441-z</pub-id>
          <pub-id pub-id-type="medline">35717401</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41597-022-01441-z</pub-id>
          <pub-id pub-id-type="pmcid">PMC9206650</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Robinson</surname>
              <given-names>KA</given-names>
            </name>
          </person-group>
          <article-title>Dictionary of eye terminology</article-title>
          <source>BMJ</source>
          <year>1997</year>
          <volume>81</volume>
          <issue>11</issue>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bjo.bmj.com/content/81/11/1021.4"/>
          </comment>
          <pub-id pub-id-type="doi">10.1136/bjo.81.11.1021c</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Shiraishi</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Katsuragawa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ikezoe</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Matsumoto</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Kobayashi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Komatsu</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Matsui</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Fujita</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Kodera</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Doi</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Development of a digital image database for chest radiographs with and without a lung nodule: receiver operating characteristic analysis of radiologists' detection of pulmonary nodules</article-title>
          <source>AJR Am J Roentgenol</source>
          <year>2000</year>
          <volume>174</volume>
          <issue>1</issue>
          <fpage>71</fpage>
          <lpage>74</lpage>
          <pub-id pub-id-type="doi">10.2214/ajr.174.1.1740071</pub-id>
          <pub-id pub-id-type="medline">10628457</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>HQ</given-names>
            </name>
            <name name-style="western">
              <surname>Lam</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>LT</given-names>
            </name>
            <name name-style="western">
              <surname>Pham</surname>
              <given-names>HH</given-names>
            </name>
            <name name-style="western">
              <surname>Tran</surname>
              <given-names>DQ</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Le</surname>
              <given-names>DD</given-names>
            </name>
            <name name-style="western">
              <surname>Pham</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Tong</surname>
              <given-names>HTT</given-names>
            </name>
            <name name-style="western">
              <surname>Dinh</surname>
              <given-names>DH</given-names>
            </name>
            <name name-style="western">
              <surname>Do</surname>
              <given-names>CD</given-names>
            </name>
            <name name-style="western">
              <surname>Doan</surname>
              <given-names>LT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>CN</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>BT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>QV</given-names>
            </name>
            <name name-style="western">
              <surname>Hoang</surname>
              <given-names>AD</given-names>
            </name>
            <name name-style="western">
              <surname>Phan</surname>
              <given-names>HN</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>AT</given-names>
            </name>
            <name name-style="western">
              <surname>Ho</surname>
              <given-names>PH</given-names>
            </name>
            <name name-style="western">
              <surname>Ngo</surname>
              <given-names>DT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>NT</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>NT</given-names>
            </name>
            <name name-style="western">
              <surname>Dao</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vu</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>VinDr-CXR: an open dataset of chest X-rays with radiologist's annotations</article-title>
          <source>Sci Data</source>
          <year>2022</year>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>429</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41597-022-01498-w"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41597-022-01498-w</pub-id>
          <pub-id pub-id-type="medline">35858929</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41597-022-01498-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC9300612</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="web">
          <article-title>EyeLink 1000 plus</article-title>
          <source>SR-Research</source>
          <year>2017</year>
          <access-date>2024-09-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.sr-research.com/eyelink-1000-plus/">https://www.sr-research.com/eyelink-1000-plus/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <article-title>Tobii eye tracker 5</article-title>
          <source>Tobii</source>
          <year>2020</year>
          <access-date>2024-09-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://gaming.tobii.com/product/eye-tracker-5/">https://gaming.tobii.com/product/eye-tracker-5/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aggarwal</surname>
              <given-names>CC</given-names>
            </name>
            <name name-style="western">
              <surname>Hinneburg</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Keim</surname>
              <given-names>DA</given-names>
            </name>
          </person-group>
          <article-title>On the surprising behavior of distance metrics in high dimensional space</article-title>
          <year>2001</year>
          <conf-name>Database Theory - ICDT 2001</conf-name>
          <conf-date>January 4-6, 2001</conf-date>
          <conf-loc>London, United Kingdom</conf-loc>
          <fpage>8</fpage>
          <pub-id pub-id-type="doi">10.1007/3-540-44503-x_27</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wold</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Esbensen</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Geladi</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Principal component analysis</article-title>
          <source>Chemometrics and Intelligent Laboratory Systems</source>
          <year>1987</year>
          <volume>2</volume>
          <issue>1-3</issue>
          <fpage>37</fpage>
          <lpage>52</lpage>
          <pub-id pub-id-type="doi">10.1016/0169-7439(87)80084-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schölkopf</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Smola</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Müller</surname>
              <given-names>KR</given-names>
            </name>
          </person-group>
          <article-title>Kernel principal component analysis</article-title>
          <year>1997</year>
          <conf-name>Artificial Neural Networks — ICANN ’97: 7th International Conference</conf-name>
          <conf-date>October 8–10, 1997</conf-date>
          <conf-loc>Lausanne, Switzerland</conf-loc>
          <pub-id pub-id-type="doi">10.1007/bfb0020217</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pedregosa</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Varoquaux</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Gramfort</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Michel</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Thirion</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Grisel</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Blondel</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Prettenhofer</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Weiss</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Dubourg</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>VanderPlas</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Passos</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cournapeau</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Brucher</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Perrot</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Duchesnay</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Scikit-learn: machine learning in python</article-title>
          <source>The Journal of Machine Learning Research</source>
          <year>2011</year>
          <volume>12</volume>
          <fpage>2825</fpage>
          <lpage>2830</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmlr.org/papers/volume12/pedregosa11a/pedregosa11a.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Guestrin</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>XGBoost: a scalable tree boosting system</article-title>
          <year>2016</year>
          <conf-name>Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</conf-name>
          <conf-date>August 13-17, 2016</conf-date>
          <conf-loc>San Francisco, CA</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2939672.2939785</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Krizhevsky</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sutskever</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Hinton</surname>
              <given-names>GE</given-names>
            </name>
          </person-group>
          <article-title>ImageNet classification with deep convolutional neural networks</article-title>
          <source>Commun. ACM</source>
          <year>2017</year>
          <volume>60</volume>
          <issue>6</issue>
          <fpage>84</fpage>
          <lpage>90</lpage>
          <pub-id pub-id-type="doi">10.1145/3065386</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cybenko</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>O'Leary</surname>
              <given-names>DP</given-names>
            </name>
            <name name-style="western">
              <surname>Rissanen</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>The Mathematics of Information Coding, Extraction and Distribution</source>
          <year>1998</year>
          <publisher-loc>Germany</publisher-loc>
          <publisher-name>Springer Science &#38; Business Media</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Summer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Alaeddini</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Analysis of feature extraction methods for prediction of 30-day hospital readmissions</article-title>
          <source>Methods of Information in Medicine</source>
          <year>2019</year>
          <volume>58</volume>
          <issue>6</issue>
          <fpage>213</fpage>
          <lpage>221</lpage>
          <pub-id pub-id-type="doi">10.1055/s-0040-1702159</pub-id>
          <pub-id pub-id-type="medline">32349155</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="web">
          <article-title>CXR8</article-title>
          <source>NIH Clinical Center Box</source>
          <access-date>2024-12-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://nihcc.app.box.com/v/ChestXray-NIHCC">https://nihcc.app.box.com/v/ChestXray-NIHCC</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
