<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id><journal-id journal-id-type="publisher-id">formative</journal-id><journal-id journal-id-type="index">27</journal-id><journal-title>JMIR Formative Research</journal-title><abbrev-journal-title>JMIR Form Res</abbrev-journal-title><issn pub-type="epub">2561-326X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v9i1e70331</article-id><article-id pub-id-type="doi">10.2196/70331</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Performance of a Retinal Imaging Camera With On-Device Intelligence for Primary Care: Retrospective Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Silvestrini</surname><given-names>Matthew</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Lui</surname><given-names>Clarissa</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Patwardhan</surname><given-names>Anil</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Chen</surname><given-names>Ying</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ali</surname><given-names>Tayyeba</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Glik</surname><given-names>Elie</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wu</surname><given-names>Honglei</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Levinstein</surname><given-names>Brian</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wenz</surname><given-names>Adrianna</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shemonski</surname><given-names>Nathan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Yang</surname><given-names>Lin</given-names></name><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Atkinson</surname><given-names>Ian</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><name name-style="western"><surname>Kavusi</surname><given-names>Sam</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff id="aff1"><institution>Verily Life Sciences</institution><addr-line>999 Bayhill Dr</addr-line><addr-line>San Bruno</addr-line><addr-line>CA</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Malerbi</surname><given-names>Fernando Korn</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Shen</surname><given-names>Mengxi</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Villabona-Martinez</surname><given-names>Valeria</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Matthew Silvestrini, PhD, Verily Life Sciences, 999 Bayhill Dr, San Bruno, CA, 94066, United States, 1 415-786-7939; <email>silve@verily.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>17</day><month>7</month><year>2025</year></pub-date><volume>9</volume><elocation-id>e70331</elocation-id><history><date date-type="received"><day>23</day><month>12</month><year>2024</year></date><date date-type="rev-recd"><day>01</day><month>05</month><year>2025</year></date><date date-type="accepted"><day>02</day><month>05</month><year>2025</year></date></history><copyright-statement>&#x00A9; Matthew Silvestrini, Clarissa Lui, Anil Patwardhan, Ying Chen, Tayyeba Ali, Elie Glik, Honglei Wu, Brian Levinstein, Adrianna Wenz, Nathan Shemonski, Lin Yang, Ian Atkinson, Sam Kavusi. Originally published in JMIR Formative Research (<ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>), 17.7.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://formative.jmir.org/2025/1/e70331"/><abstract><sec><title>Background</title><p>Access to screening continues to be a barrier for the early detection of diabetic retinopathy (DR). Primary care&#x2013;based diabetic retinopathy screening could improve access, but operational challenges, such as cost and workflow management, hamper the widespread adoption of retinal camera systems in primary care clinics in the United States.</p></sec><sec><title>Objective</title><p>This study aimed to develop and evaluate a retinal screening system suitable for integration into a primary care workflow.</p></sec><sec sec-type="methods"><title>Methods</title><p>We developed a nonmydriatic, 45&#x00B0; field imaging retinal camera system, the Verily Numetric Retinal Camera (VNRC; Verily Life Sciences LLC), able to generate high-fidelity retinal images enabled by on-device intelligent features. The VNRC output flows into cloud-based software that accepts and routes digitized images for grading. We evaluated the performance and usability of the VNRC in 2 studies. A retrospective performance study compared the performance of VNRC against a reference camera (Crystalvue NFC-700 [Crystalvue Medical]) as well as the correlation between VNRC capture status and gradability (as determined by ophthalmologist graders). The usability study simulated a primary care setting for a combined cohort of trained and untrained users (corresponding to patients in the simulation) and operators (corresponding to health care personnel in the simulation), where respondents completed a questionnaire about their user experience after attempting to capture images with the VNRC.</p></sec><sec sec-type="results"><title>Results</title><p>In the comparative performance study (N=108, K=206 images), a total of 98.5% (203/206) of images captured by the VNRC were graded as sufficient for clinical interpretation compared to 97.1% (200/206) of Crystalvue NFC-700 images (difference in proportion was 0.015, 95% CI &#x2013;0.007 to 0.033). In the quality control algorithm evaluation (N=172, K=343 images), we found a positive association (<italic>&#x03C6;</italic>=0.58, 95% CI 0.45&#x2010;0.69) between gradability status (gradable or nongradable) as determined by ophthalmologists and the capture status (recapture not-needed or needed) as determined by the VNRC quality control algorithm. In the usability study (n=15 users and n=30 operators), all participating users (15/15) indicated that they were able to have both eyes screened easily. Most users and operators indicated agreement (from somewhat agree to strongly agree) with statements describing the imaging process as intuitive (15/15, 100% and 29/30, 97%), comfortable (15/15, 100% and 30/30, 100%), and allowing for a positive experience (15/15, 100% and 30/30, 100%), of users and operators, respectively.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Our findings about the performance and usability of this retinal camera system support its deployment as an integrated end-to-end retinal service for primary care. These results warrant additional studies to fully characterize real-world usability across a wider and diverse set of primary care clinics.</p></sec></abstract><kwd-group><kwd>diabetic retinopathy screening</kwd><kwd>retinal camera</kwd><kwd>retinal imaging</kwd><kwd>fundus imaging</kwd><kwd>diabetic retinopathy</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Diabetes mellitus (DM) is a growing epidemic that impacts more than 38 million Americans [<xref ref-type="bibr" rid="ref1">1</xref>], and accounts for a substantial burden to the health care system (approximately US $412 billion) in both direct costs and lost productivity [<xref ref-type="bibr" rid="ref2">2</xref>]. One of the most common complications associated with DM is diabetic retinopathy (DR), a progressive condition that impacts the microvasculature of the eye and results in irreversible damage to the retina [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. DR, which occurs in approximately 25% of all patients with DM [<xref ref-type="bibr" rid="ref5">5</xref>], is the leading cause of blindness in the United States and represents a major quality of life burden. Even though studies have found that early detection of DR can mitigate vision loss by over 90% [<xref ref-type="bibr" rid="ref6">6</xref>], diligent screening of patients with diabetes for DR in the United States remains encumbered by the need for a referral into an ophthalmology clinic. The logistical and financial barriers associated with specialty care are a major cause of noncompliance with the American Diabetes Association screening guidelines [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>]. These barriers disproportionately impact rural and resource-poor areas, where limitations in the availability of specialists and ophthalmologic equipment may be associated with screening rates lower than 20% [<xref ref-type="bibr" rid="ref10">10</xref>].</p><p>Implementation of DR screening in primary care clinics could improve access [<xref ref-type="bibr" rid="ref11">11</xref>], and there are retinal camera systems suitable for primary care deployment, such as tabletop Crystalvue (Crystalvue Medical) or Topcon (Topcon Healthcare, Inc) models, and handheld Retinavue (Hillrom), Phelcom (Phelcom Technologies), Remidio (Remidio Innovative Solutions Pvt Ltd), or Volk (Volk Optical) models. Efficient DR screening in primary care, however, faces hurdles. Common issues that impact the consistency of image quality, such as handling small pupil sizes, cataracts, lid and lash occlusions, eye movements, or user positioning, may represent technical challenges for primary care settings. Mydriasis can be another technical challenge; while nonmydriatic imaging is operationally more approachable than mydriatic imaging, it yields some degradation in image quality [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. In addition, the adoption of retinal camera systems in primary care clinics requires resource investments and efficient integration into the clinical workflow. It is therefore unsurprising that DR screening programs tend to succeed in large and well-resourced health organizations that can adopt mydriatic-based systems [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>], while resource-constrained or small primary-care clinics face difficulties acquiring and operating DR programs [<xref ref-type="bibr" rid="ref17">17</xref>]. Apart from the direct equipment purchasing costs, training and maintaining dedicated staff (in settings prone to high staff turnover) may become problematic [<xref ref-type="bibr" rid="ref18">18</xref>]. Furthermore, these environments may often lack optimal infrastructures for electronic health record integration, information technology and Picture Archiving And Communication System systems, and tele-retinal grading services.</p><p>These issues are top concerns when tailoring DR screening systems toward primary care. Desirable features in this regard would include relatively affordable cost, as well as the feasibility of integration into existing workflows and of use by nondedicated staff. Yet, it remains imperative to achieve these attributes while maintaining diagnostic accuracy and integrity. DR screening systems need to produce images of sufficient quality to enable ophthalmologists to assess the presence of retinal disease appropriately. Retinal image quality is therefore a necessary condition that correlates with downstream gradability and clinical interpretability.</p><p>We report the development and performance characterization of a retinal screening camera system for use in a primary care practice. This system, developed as &#x201C;Verily Retinal Camera&#x201D; during the initial investigational period, now termed Verily Numetric Retinal Camera (VNRC; Verily Life Sciences LLC) and provides nonmydriatic, color fundus photos of the eye. Our objectives were threefold: (1) to evaluate the comparative performance of the VNRC against a standard state-of-the-art reference device, the Crystalvue NFC-700 (Crystalvue Medical), for providing high-quality images sufficient for clinical interpretation in a retina image analysis study; (2) to characterize the utility of the VNRC quality control algorithm, investigating the association between the algorithm outputs (scoring the image quality) and human graders&#x2019; assessments of whether images were gradable or ungradable; and (3) to assess the usability of the VNRC among potential operators and users in a simulated primary care setting.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Devices</title><p>The VNRC and the Crystalvue NFC-700 were used in this study. The VNRC is a nonmydriatic, macula-centered, 45&#x00B0; field imaging retinal camera system that uses a set of proprietary machine learning algorithms to generate high-fidelity composite color posterior chamber images of the eye by capturing and compiling a series of retinal images from each perceived single flash (&#x003C;200 ms). The camera system is incorporated into a cloud-based software platform that accepts digitized images transferred from the VNRC and has the capacity to store, convert formats, display, and transfer data or imaging data between cameras (Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p><p>The Crystalvue NFC-700 camera [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>] uses a conventional technique [<xref ref-type="bibr" rid="ref21">21</xref>] to automatically adjust focus and capture the best quality image using a single annulus type of illumination. The image is captured by the built-in color complementary metal-oxide-semiconductor camera module.</p></sec><sec id="s2-2"><title>Retrospective Retina Image Analysis Study</title><p>This was an analysis of previously collected paired-image acquisitions (discussed further in this study). The objective of this study was to characterize the overall performance of the VNRC, articulated in 2 aims, that is, first, characterize the comparative performance of the VNRC (as an investigational device) against a reference instrument to generate clinically interpretable images, and second, evaluate the utility of the image quality control feature (IQCF) embedded in the camera system (discussed in section &#x201C;Development and Description of a Retinal Imaging Camera for Primary Care&#x201D;), by investigating the association between the outputs of this algorithm scoring the quality (ie, the capture status) of the images and image gradeability as determined by ophthalmologists.</p><sec id="s2-2-1"><title>Study Participants and Image Eligibility</title><p>The original trial enrolled 212 participants of at least 22 years of age and who provided informed consent to the protocol; it was conducted in 4 separate phases at 2 sites (Verily Life Sciences South San Francisco headquarters and Diablo Clinical Research). This retrospective study was based on a trial subcohort consisting of non-Verily participants with self-reported history of diabetes (N=172; Figure S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>); this exclusion was to remove sponsor employees as participants in the evaluation of the device.</p><p>Analyses were conducted on retinal fundus images collected without mydriasis. For the comparative performance analysis (first aim), eligible images were those from participants who achieved successful image acquisitions without the aid of mydriasis by both cameras (investigational and reference) from both eyes, and for which annotations about clinical interpretability from 3 ophthalmologists were available (n=108 participants). These excluded images captured with only one of the cameras, and also those lacking proper annotations.</p><p>For the IQCF utility analysis (second aim), eligible images were those from participants with at least 1 evaluable eye image obtained with the VNRC with a corresponding output from the reference human graders, namely, an annotation as &#x201C;ungradable&#x201D; or &#x201C;gradable&#x201D; after human inspection (n=172 participants). This excluded those images without proper grader annotation.</p></sec><sec id="s2-2-2"><title>Study Procedures</title><p>Nonmydriatic retinal fundus images were acquired on 2 separate cameras from both right and left eyes of each participant, during a single session. Operators took up to 3 images per eye at their discretion, first on the Crystalvue NFC-700 followed by the VNRC.</p><p>Graders received images according to an established protocol. For the Crystalvue NFC-700 camera, graders received the last image acquired for each eye, and for the VNRC, graders received either the first image that passed the quality control feature for each eye, or the last image acquired per eye if no image had passed the quality control feature. This sequencing approach aimed to achieve parity using the highest quality image captured from each camera.</p><p>Images from both cameras were presented in random order to 3 board-certified ophthalmologist graders one by one using a grading platform (Figure S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). Graders were external (non&#x2013;sponsor-affiliated) professionals who assessed gradability and clinical significance as well as image quality characteristics according to a predefined rubric. The final outcomes were based on the majority vote across 3 graders.</p></sec><sec id="s2-2-3"><title>Statistical Analyses</title><sec id="s2-2-3-1"><title>Aim 1</title><p>We compared the performance of VNRC to the reference camera (Crystalvue NFC-700) based on the proportion of acquired images that were determined to be &#x201C;sufficient quality for clinical interpretation.&#x201D; This derived binary outcome was based on the majority vote of 3 board-certified ophthalmologists who each reviewed and annotated the same image as &#x201C;Yes&#x201D; or &#x201C;No.&#x201D; Each participant could contribute up to 1 image from each eye for annotation and subsequent end point evaluation. Duplicate acquisitions of the same eye were not included in any analysis.</p><p>The estimated parameter for the end point was the difference in the proportion of images considered sufficient quality for clinical interpretation (ie, Yes), between those from the VNRC camera and those from the Crystalvue NFC-700 camera. Point estimates and 95% CIs for the reported proportions and difference in proportions were based on bootstrap (ie, &#x201C;cluster&#x201D; or &#x201C;block&#x201D; bootstrap) that accounted for the paired-image design (ie, correlated proportions) and within-participant clustering, as participants provided images from both the right and left eye. This cluster bootstrap was applied as follows:</p><p>Define: J=cluster unit = participant, where there may be multiple observation units (ie, eyes) within a participant. The sampling is based on the total number of J clusters.</p><p>The first step is to randomly select J number of clusters with replacement. For each cluster selected (with some clusters selected more than once and others not selected at all), all observations (ie, eyes) within that cluster are selected. Original cluster sizes are maintained.</p><p>In addition, due to the paired design, whenever an eye image is selected based on the output from 1 camera, the corresponding eye image from the comparative camera is also selected.</p><p>A difference in proportion is calculated based on the bootstrapped sample, and the process is repeated B number of times. Our analysis used B=10,000.</p><p>The point estimate for the difference in proportion is based on the 50th percentile of the resulting bootstrap distribution. Nonparametric 95% CIs for the difference in proportion were derived based on the 2.5% and 97.5% quantiles of the resulting bootstrap distribution.</p></sec><sec id="s2-2-3-2"><title>Aim 2</title><p>We investigated the utility of the IQCF based on the association of the scores generated by this algorithm with the classification as &#x201C;ungradable&#x201D; or &#x201C;gradable&#x201D; by human graders. The algorithm scores image quality, generating an output of &#x201C;recapture not needed&#x201D; (eg, sufficient quality) or &#x201C;recapture needed&#x201D; (eg, insufficient quality) based on a score threshold. We reported the results from this portion of the study using summaries appropriate for categorical or ordinal data (counts and percentages). The correlation of gradeability status with outputs from the IQCF algorithm was summarized using both a contingency table and the Phi-coefficient.</p></sec></sec></sec><sec id="s2-3"><title>Usability Study</title><sec id="s2-3-1"><title>Study Participants</title><p>This study was conducted across two groups (N=45) in a simulated primary care setting: (1) an operator cohort consisting of individuals with health care degrees or licenses (registered nurse, nurse practitioner, Licensed Practical Nurse, and physician assistant; n=15), or some health care training (master&#x2019;s degree, pharmacy technician, and phlebotomist; n=15), and (2) a user cohort consisting of participants with diabetes without health care training, who were asked to complete the VNRC retinal screening workflow unaided by a technician (n=15). This study was determined to be exempt research that did not require IRB approval.</p></sec><sec id="s2-3-2"><title>Study Procedures</title><p>Operators (group 1) were trained in 2 subgroups. Individuals with health care degrees or licenses and those with some health care training. Training consisted of a visual or auditory slide presentation, demonstration, hands-on activities, and time for the sponsor to answer questions. Approximately half of each subgroup received in-person training, while the other half received one-on-one training from a remote sponsor representative via videoconference. All operators had access to the camera system during training sessions. Training lasted approximately 1 hour. Trained participants experienced a decay period of at least 1 hour and up to 7 hours between their training and test sessions.</p><p>The simulated environment (ie, clinic) was equipped with an adjustable height table and all accessories required to use the retinal camera. Participants were asked to perform tasks within representative, naturalistic use scenarios; participants in the operator cohort interacted with pretrained actors (as stand-ins for hypothetical patients) who behaved consistently across simulations to elicit specific responses from operator participants. Moderators used a series of questionnaires around camera ease of use to collect participant feedback.</p></sec><sec id="s2-3-3"><title>Analysis</title><p>Operator and user questionnaires followed Likert scales (from 1=strongly disagree to 7=strongly agree). We analyzed responses using descriptive statistics.</p></sec></sec><sec id="s2-4"><title>Development and Description of a Retinal Imaging Camera for Primary Care</title><p>The Verily Retinal Service was developed for use in primary care clinics. The VNRC is a lightweight (approximately 6 kg), 45&#x00B0; field imaging camera system consisting of custom electronics, optics, LEDs, and a retinal camera (<xref ref-type="fig" rid="figure1">Figures 1A and 1B</xref>). The VNRC has a range of pivot=0&#x00B0; to 45&#x00B0; to adjust for user height, posture, and comfort. The black face rest is light blocking, which enables retinal imaging in bright ambient lighting conditions, and the built-in handle allows for flexibility for camera placement within a clinic space.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Overview of the Verily Numetric Retinal Camera. (<bold>A</bold>) Illustration of the Verily Numetric Retinal Camera and the (<bold>B</bold>) architecture. (<bold>C</bold>) Operator&#x2019;s and user&#x2019;s view during image acquisition. Cam: camera.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e70331_fig01.png"/></fig><p>The VNRC uses a stereo infrared camera system (pupil camera 1 and 2 in <xref ref-type="fig" rid="figure1">Figure 1B</xref>) to achieve proper pupil alignment and facilitate fast image acquisition (120 frames per second [fps]). Users can adjust an image to their best perceived focus with a focus knob, and an interactive game-like interface allows users to optimize proper pupil alignment using small eye or head movements within the face rest, while operators can simultaneously oversee this process to ensure proper eye location (<xref ref-type="fig" rid="figure1">Figure 1C</xref>). The focus motor controls both the retinal camera and microdisplay, allowing the user to view an interactive image and video inside the camera. This is similar to an experience in a virtual reality headset, but monocular.</p><p>After the initial focus step, users position their head into the VNRC face rest in order to align their eye with the camera lens. Once proper focal length is established by dialing the focus knob, users can confirm by pressing the top of the focus knob. Alternatively, for users unable to use the focus knob, the VNRC can also be brought into focus using an operator-driven setting controlled via the software graphical user interface.</p><p>The VNRC then uses a proprietary redundant illumination system to automatically collect a series of retinal images (up to 30). This is in contrast to conventional techniques that only capture 1 image frame per flash [<xref ref-type="bibr" rid="ref21">21</xref>] and is made possible by the use of high-speed high-sensitivity image sensors (120 fps) deeply integrated with the system-on-chip. The flash duration is limited to ensure sufficient quality, minimizing dazzle. Here, the system-on-chip uses an LED array with the camera system to capture a series of full-field (&#x003E;45&#x00B0;) images of the retina with various illuminations focused on different areas of the crystalline lens of the eye (<xref ref-type="fig" rid="figure2">Figure 2</xref>). The proprietary redundant illumination system dynamically adapts to specific eye characteristics, such as pupil size and position, and optimizes a series of retinal illumination configurations, regardless of corneal clarity.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Verily Numetric Retinal Camera&#x2019;s pupil tracking adaptive illumination. (<bold>A</bold>) Diagram of the eye indicating where the retinal camera projects the illumination dot location onto the pupil area; (<bold>B</bold>) comparison between conventional flash and exposure cameras, where only 1 image is captured and has a higher likelihood of artifacts, and the Verily Numetric Retinal Camera method of oversampling, where a series of images are captured and merged into a final image and hence mitigating the artifacts from any one single image.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e70331_fig02.png"/></fig><p>This sequence of flashes, or &#x201C;burst imaging,&#x201D; illuminates the entire field of view, occurs within 200 milliseconds, and is perceived as a single flash. The acquisition time falls below the typical latency of the pupil&#x2019;s response time [<xref ref-type="bibr" rid="ref22">22</xref>], so that the imaging process is not expected to interfere with natural pupil constriction. The typical burst image set contains many fully illuminated images, with varying levels of artifacts on the images.</p><p>The VNRC uses a proprietary &#x201C;Burst Reduce Algorithm&#x201D; to generate a single high-fidelity retinal image (<xref ref-type="fig" rid="figure3">Figures 3A and 3B</xref>), by merging a variable number of frames. The total number of frames is typically 20 images, but is highly dependent on the presence of artifacts and the amount of eye motion during the camera flash. The algorithm draws similarities from pixel-level high dynamic range imaging [<xref ref-type="bibr" rid="ref23">23</xref>] but incorporates an assignment of a score layer to separate photons due to retinal reflection from scattered photons due to cataract or cornea reflection, or lid or lash occlusion, after compensating for intra-acquisition gaze shift. It is notable that no fine spatial filtering was applied to enhance the contrast or segment-specific features or pathologies [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>].</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Creation of a single composite image with Verily Numetric Retinal Camera. (<bold>A</bold>) Burst imaging captures up to 30 images from a single flash via oversampling (&#x003C;200 ms). (<bold>B</bold>) Using a burst reduction algorithm, the Verily Numetric Retinal Camera creates a single high-fidelity composite image. (<bold>C</bold>) A quality control algorithm (the image quality control feature) is then used to predict the signal quality from the retina, and a score is assigned. A passing image quality control feature score indicates that an image is of sufficient quality and does not require recapture. IQCF: image quality control feature; QC: quality control.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e70331_fig03.png"/></fig><p>A subsequent algorithm, the IQCF algorithm, then performs a quality control assessment of the resulting composite image to determine if a recapture is needed (<xref ref-type="fig" rid="figure3">Figure 3C</xref>). The model was trained to compute areas where the retina is clearly visible, without impaired visibility due to (1) darkness, (2) saturation, (3) blur, or (4) haze. The IQCF algorithm produces a single score per image by calculating the probability that each pixel contributes to that of the retina signal, as represented by an image mask. The algorithm incorporates a fixed cutoff to determine if the image needs to be recaptured. If the score does not exceed this operating point, then the camera will output instructions to the operator that another image acquisition is required. Thus, the VNRC provides real-time image quality characterization information to help reduce the chances that a substandard image is used for clinical interpretation. Importantly, the IQCF does not score image gradeability.</p><p>The final output of the VNRC can integrate into a primary care workflow. It feeds into a cloud-based software platform that accepts digitized images transferred from the camera and has the capacity to transfer, store, convert formats, display, and transfer medical device data or medical imaging data between medical devices (Figure S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). The VNRC also continuously collects metadata such as details on camera use, uptime, and operation in order to allow for real-time error handling. This passively collected metadata is uploaded along with the retinal images.</p></sec><sec id="s2-5"><title>Ethical Considerations</title><p>This was a retrospective study conducted in a subset of the participants in a prospective technical feasibility trial (Verily protocol 103535; approved by institutional review board [IRB] Western IRB, before initiation, IRB Protocol #20214693) [<xref ref-type="bibr" rid="ref26">26</xref>]. All participants signed informed consent (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>) approved by the IRB and received nominal compensation for their time ($25 for the screening procedure, an additional $75 for each completed study visit). Personal study-related data were managed in accordance with local data protection law.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Comparative Performance of the VNRC Against the Crystalvue NFC-700 Camera</title><p>Eye images (K=206) were captured from 108 participants (<xref ref-type="table" rid="table1">Table 1</xref>) with both the VNRC and the Crystalvue NFC-700 Camera.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Demographic characteristics of participants in the comparative performance analysis within the retrospective retina image study (as reference, refer to the characteristics of all participants in the original technical feasibility trial in the 2 right columns).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Characteristics</td><td align="left" valign="bottom">Retrospective study comparative performance (N=108)</td><td align="left" valign="bottom" colspan="2">Initial technical feasibility trial (N=212)</td></tr><tr><td align="left" valign="bottom"/><td align="left" valign="bottom"/><td align="left" valign="bottom">Phase 1&#x2010;3 (n=192)</td><td align="left" valign="bottom">Phase 4 (n=20)</td></tr></thead><tbody><tr><td align="left" valign="top">Age (y)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mean (SD)</td><td align="left" valign="top">57.8 (13.2)</td><td align="left" valign="top">54.8 (16.6)</td><td align="left" valign="top">49.0 (10.3)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Median (range)</td><td align="left" valign="top">59.5 (23.0&#x2010;84.0)</td><td align="left" valign="top">&#x2003;&#x2014;<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">&#x2003;&#x2014;</td></tr><tr><td align="left" valign="top">Sex, n (%)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top">60 (55.6)</td><td align="left" valign="top">94 (49)</td><td align="left" valign="top">11 (55)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">48 (44.4)</td><td align="left" valign="top">98 (51)</td><td align="left" valign="top">9 (45)</td></tr><tr><td align="left" valign="top">Race, n (%)</td><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Asian</td><td align="left" valign="top">9 (8.3)</td><td align="left" valign="top">44 (22.9)</td><td align="left" valign="top">&#x003C;5 (&#x2264;5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Black or African American</td><td align="left" valign="top">18 (16.7)</td><td align="left" valign="top">18 (9.4)</td><td align="left" valign="top">&#x003C;5 (&#x2264;5)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>White</td><td align="left" valign="top">76 (70.4)</td><td align="left" valign="top">115 (59.9)</td><td align="left" valign="top">18 (90)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Other</td><td align="left" valign="top">5 (4.6)</td><td align="left" valign="top">15 (7.9)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Diagnosed diabetes<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup>, n (%)</td><td align="left" valign="top">108 (100)</td><td align="left" valign="top">123 (64.1)</td><td align="left" valign="top">20 (100)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>For 0&#x2010;15 y</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">35 (18.3)</td><td align="left" valign="top">5 (25)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>For 15+ y</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">88 (45.8)</td><td align="left" valign="top">15 (75)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Not available.</p></fn><fn id="table1fn2"><p><sup>b</sup>A post hoc analysis showed 39 (36.1%) participants with mild diabetic retinopathy, 40 (37%) with moderate diabetic retinopathy, and none with severe diabetic retinopathy; 31 (28.7%) had proliferative diabetic retinopathy (based on image evaluation obtained with the reference camera).</p></fn></table-wrap-foot></table-wrap><p>The proportion of images of sufficient quality for clinical interpretation was 0.985 (203/206) and 0.971 (200/206) for the VNRC and Crystalvue NFC-700 cameras, respectively. The difference in proportion was 0.015 (95% CI &#x2013;0.007 to 0.033; <xref ref-type="table" rid="table2">Table 2</xref>, examples in <xref ref-type="fig" rid="figure4">Figure 4</xref>).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Comparative performance results.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">N=108 (K=206 images)</td><td align="left" valign="bottom">Investigational VNRC<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> and reference Crystalvue</td></tr></thead><tbody><tr><td align="left" valign="top">Images with sufficient quality for clinical interpretation<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>, k (%)</td><td align="left" valign="top">203/206 (98.5) and 200/206 (97.1)</td></tr><tr><td align="left" valign="top">Difference in proportion (95% CI)</td><td align="left" valign="top">0.015 (&#x2013;0.007 to 0.033)</td></tr><tr><td align="left" valign="top">Uncaptured images, k (n)</td><td align="left" valign="top">3 (2) and 42 (25)</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>VNRC: Verily Numetric Retinal Camera.</p></fn><fn id="table2fn2"><p><sup>b</sup>Some of the reasons that the images were of insufficient quality: eyelash artifacts, eyelash artifacts, low contrast, or extended areas where detail was lost.</p></fn></table-wrap-foot></table-wrap><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Examples of images with or without sufficient quality for clinical interpretation from each of the cameras in the comparison. VNRC: Verily Numetric Retinal Camera.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e70331_fig04.png"/></fig><p>Graders also computed the quality of the images across several specific criteria, for both the VNRC and the reference camera (Tables S1 and S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>), indicating whether the quality of specific image aspects was sufficient for clinical interpretation (note that these were not pooled for a single majority classification; there were 3 separate adjudications for each factor in each image). Graders evaluated the visualization of the optic disc and determined that it was adequate in a majority of images for both devices (in at least 90%, 225/248 of images by the VNRC and in at least 93%, 195/209 of the reference). There were similar results in the classification of macula visualization (deemed appropriate in at least 92%, 229/248 of VNRC images and 83%, 175/209 of reference images), and the visualization of the retinal vessels (appropriate in at least 94%, 233/248 of the VNRC images and 92%, 194/209 of reference images). Graders also found a majority of images to be of sufficient quality regarding key imaging features, such as adequate focus (in at least 91%, 228/248 of VNRC images and 88%, 185/209 of reference images), appropriate brightness (in at least 94%, 213/248 and 198/209 of images, for both), adequate field of view (in at least 91%, 228/248 and 191/209 of images, for both), no significant image defects (in at least 91%, 227/248 and 92%, 194/209), no small pupil interference (at least 89%, 222/248 and 87%, 183/209), and no ocular media opacity (in at least 73%, 183/248 and 85%, 178/209 of VNRC and reference images, respectively).</p></sec><sec id="s3-2"><title>Performance of the IQCF</title><p>We found a moderate association (<italic>&#x03C6;</italic>=0.58) between ophthalmologists&#x2019; assessments of a retinal image&#x2019;s gradeability and the IQCF algorithm&#x2019;s scoring of capture status (<xref ref-type="table" rid="table3">Table 3</xref>).</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Contingency table of image quality control factor scores of image quality and gradeability assessments determined by ophthalmology graders.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">IQCF<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> score (N=172; K=343 images)</td><td align="left" valign="top" colspan="2">Graders&#x2019; rating, n (%)</td><td align="left" valign="top">Total</td></tr><tr><td align="left" valign="bottom"/><td align="left" valign="top">Nongradable</td><td align="left" valign="top">Gradable</td><td align="left" valign="bottom"/></tr></thead><tbody><tr><td align="left" valign="top">&#x2003;Recapture needed (IQCF=not pass)</td><td align="char" char="." valign="top">20 (40<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup>)</td><td align="char" char="." valign="top">30 (60)</td><td align="char" char="." valign="top">50</td></tr><tr><td align="left" valign="top">&#x2003;Recapture not needed (IQCF=pass)</td><td align="char" char="." valign="top">1 (0.3)</td><td align="char" char="." valign="top">292 (99.7)</td><td align="char" char="." valign="top">293</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>IQCF: image quality control factor.</p></fn><fn id="table3fn2"><p><sup>b</sup>Percentages reflect the total number of image quality control factor classifications that were determined to be nongradable or gradable during human assessment (ie, graders&#x2019;).</p></fn></table-wrap-foot></table-wrap><p>Overall, the IQCF scored 50 images as needing recapture, but the human assessment was &#x201C;gradable&#x201D; for 60% (30/50) of these. Conversely, the vast majority of the images scored by the IQCF as not needing recapture, 292 out of 293 (99.7%), were found &#x201C;gradable&#x201D; by human assessment. Refer to <xref ref-type="fig" rid="figure5">Figure 5</xref> for examples of images with concordant and discordant IQCF scoring and human assessment.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Example images of different image quality control feature scores and human assessments. (<bold>A</bold>) Image quality control feature &#x201C;pass&#x201D; with acceptable image quality, but ungradable due to optic disc shift. (<bold>B</bold>) Image quality control feature &#x201C;not pass&#x201D; with poor image quality, but gradable due to visible lesions. (<bold>C</bold>) Image quality control feature &#x201C;pass&#x201D; and gradable. (<bold>D</bold>) Image quality control feature &#x201C;not pass&#x201D; and ungradable, due to eyelash artifacts.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e70331_fig05.png"/></fig></sec><sec id="s3-3"><title>User Research Study</title><p>There was agreement among 100% (15/15) of the participating users in the simulated clinic environment that they were able to have both eyes screened easily. In addition, they somewhat agreed or strongly agreed (rating of 5 to 7 on a 1&#x2010;7 Likert scale) with statements indicating that they were confident in knowing how to complete the screening after watching the video, that they found it intuitive to set themselves up with the camera properly, felt comfortable while completing the screening, and had a positive experience with the camera (<xref ref-type="table" rid="table4">Table 4</xref>, Table S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>).</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Summary questionnaire results for simulated users (no health care training).</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Scale and question</td><td align="left" valign="bottom">Users (n=15), n (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Likert scales (range 1-7)<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup>, responses&#x003E;4</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Q1: I felt confident that I knew how to do the screening after watching the video</td><td align="left" valign="top">15 (100)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Q2: I found it intuitive to get myself set-up with the camera properly</td><td align="left" valign="top">15 (100)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Q3: I felt comfortable when doing the screening</td><td align="left" valign="top">15 (100)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Q4: I had a positive experience using the camera</td><td align="left" valign="top">15 (100)</td></tr><tr><td align="left" valign="top">Binary (yes or no), response &#x201C;yes&#x201D;</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Q5: I had both my eyes screened easily</td><td align="left" valign="top">15 (100)</td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>Likert scale, where 1=strongly disagree and 7=strongly agree.</p></fn></table-wrap-foot></table-wrap><p>All participating operators under simulated conditions (30/30, 100%), who had either health care degrees, licenses, or some training somewhat agreed or strongly agreed (ratings of 5 to 7 on a 1&#x2010;7 Likert scale) with statements indicating that they felt comfortable completing the screening, had a positive experience while using the camera, found training easy and useful to understand, found it easy to capture a retinal image with the camera, and found the camera easy to clean (<xref ref-type="table" rid="table5">Table 5</xref>, and Table S4 and S5 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>). A majority (23/30) felt that hands-on help for users was probably not needed, and approximately half (14/30) felt that they would not have to apply their relevant clinical training to complete specific tasks.</p><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Summary questionnaire results for simulated operators (with health care licenses or training).</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top" rowspan="2">Questions on Likert scale (range 1-7)<sup><xref ref-type="table-fn" rid="table5fn1">a</xref></sup></td><td align="left" valign="top" colspan="3">Participants with responses &#x003E;4, n (%)</td></tr><tr><td align="left" valign="bottom">Health care degree or license (n=15)</td><td align="left" valign="bottom">Health care training (n=15)</td><td align="left" valign="bottom">Pooled (n=30)</td></tr></thead><tbody><tr><td align="left" valign="top">Q1: I felt comfortable when doing the screening</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">30 (100)</td></tr><tr><td align="left" valign="top">Q2: I feel like I needed to provide hands on help for the patient</td><td align="left" valign="top">1 (7)</td><td align="left" valign="top">6 (40)</td><td align="left" valign="top">7 (23)</td></tr><tr><td align="left" valign="top">Q3: I had a positive experience using the camera</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">30 (100)</td></tr><tr><td align="left" valign="top">Q4: I found the training easy to understand and useful</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">30 (100)</td></tr><tr><td align="left" valign="top">Q5: I found it easy to capture a retinal image with the camera</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">30 (100)</td></tr><tr><td align="left" valign="top">Q6: I had to apply my relevant clinical training to complete specific tasks</td><td align="left" valign="top">8 (53)</td><td align="left" valign="top">8 (53)</td><td align="left" valign="top">16 (53)</td></tr><tr><td align="left" valign="top">Q7: I found the camera user interface intuitive and easy to understand</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">14 (93)</td><td align="left" valign="top">29 (97)</td></tr><tr><td align="left" valign="top">Q8: I found the camera easy to clean</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">15 (100)</td><td align="left" valign="top">30 (100)</td></tr></tbody></table><table-wrap-foot><fn id="table5fn1"><p><sup>a</sup>Likert scale, where 1=strongly disagree and 7=strongly agree.</p></fn></table-wrap-foot></table-wrap><p>Most of the issues reported by users related to involuntary clicks back and forth in the user interface, particularly during the &#x201C;focus image&#x201D; steps. These issues, at most, caused delays (not failures) in the image capture process.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>We report here the results of a series of analyses to characterize the performance and usability of a new retinal camera system aimed for implementation in primary care settings. The performance was satisfactory in 2 main aspects. First, the VNRC performed at comparable levels to a reference camera for the generation of quality retinal images; the numeric difference in the percentage proportion of images with quality to be clinically interpretable was low (0.015), and the CI for that difference straddled 0, indicating a likelihood that there was no difference between the 2 devices. Further reinforcing the main results, gradability across a variety of image quality metrics appeared numerically similar across VNRC and the reference camera. Second, the quality-scoring outputs of the quality control algorithm embedded in the system showed a moderate association (<italic>&#x03C6;</italic>=0.58) with the classification of images as &#x201C;gradable&#x201D; or &#x201C;ungradable&#x201D; by human graders. Furthermore, operators and users (ie, individuals with and without previous health care training) found the system to be generally intuitive and approachable to use, allowing them to feel comfortable performing image captures on their own (at least 95%, 44/45 of survey participants agreed to the corresponding statements).</p><p>Our findings indicate that VNRC can perform at the level of a standard tabletop retinal camera system. Thus, the VNRC may provide a balance of features that could mitigate some of the reservations from primary care providers to the adoption of DR screening programs. Across both primary care and specialty clinics, tabletop equipment may be the highest quality option for retinal imaging [<xref ref-type="bibr" rid="ref27">27</xref>]; however, high direct costs and the resource and space demands of optimal installation (ie, dedicated darkened room) act as deterrents, particularly in underresourced environments. While handheld and smartphone-based cameras overcome these aspects, reports are mixed regarding the quality of their image capture and their ease of successful use for DR screening (for instance, the skill and finesse required to obtain proper eye alignment [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref28">28</xref>], particularly when imaging is performed without pharmacologic pupil dilation [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref29">29</xref>-<xref ref-type="bibr" rid="ref31">31</xref>]. Most of the images excluded from the comparative analysis (for lack of bilateral counterparts) were in the reference camera group (probably due to the inability of that camera to capture images for pupil sizes below 3.5 mm). The rates of ungradable images we observed are encouragingly low, considering that other studies with mydriatic devices (where they would be expected to be lower) have reported approximately 6% [<xref ref-type="bibr" rid="ref32">32</xref>], or ranges from 0% to approximately 28% with mydriatic cameras (when grading was done via algorithm) [<xref ref-type="bibr" rid="ref33">33</xref>]. A possible downside of producing a high rate of ungradable images (which can be due to a variety of causes, such as small pupil size, eye pathologies) may be an inflation of referrals that can overwhelm downstream eye care services and reduce the overall cost-effectiveness of teleophthalmology programs [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. The VNRC has advantages closely associated with handheld systems in cost, maneuverability, relatively low weight (10&#x2010;20 kg lighter than other tabletop systems), and relatively low space demands. Our results suggest that this system can produce images of quality comparable with expensive and more sophisticated tabletop equipment, overall and across specific image metrics, possibly reaching a balance between operational requirements and quality performance, which is particularly well-suited to primary care clinics.</p><p>Our results also indicate that the quality control feature within the VNRC system, the IQCF, functions as intended and could effectively filter out the intake of undesirable, poor-quality images into practical clinical workflows. Capturing retinal images of quality is a necessary requisite for the subsequent clinical utility of those images during diagnosis; entering a high proportion of low-quality images could render a clinical workflow inefficient and impair effective care. The approach presented in this work was to embed an algorithm to score image quality in real time, prompting users to discard low-quality images (those below a score threshold) and recapture before entering an image into the clinical workflow. Thus, it was important to establish the correlation between the proportion of retinal images passing the VNRC&#x2019;s IQCF threshold and downstream assessments of gradability according to ophthalmic graders. We found a moderate correlation, as nearly all the images cleared by the IQCF as &#x201C;not needing recapture&#x201D; were indeed found &#x201C;gradable&#x201D; by human assessment. The IQCF scored noticeably more images as &#x201C;needing recapture&#x201D; than human graders deemed &#x201C;ungradable&#x201D; (ie, 60%, 30/50 of images that IQCF labeled as needing recapture were actually deemed gradable during human assessment). This is probably due to the fact that the IQCF is a visibility measure, with an output that is pathology-independent; in contrast, human graders may grade images if a lesion is clearly identifiable, even in a context of low overall quality or visibility that the IQCF probably would &#x201C;not pass.&#x201D; This excess of images with &#x201C;recapture needed&#x201D; scores, however, may not represent a major practical problem, since image recapture in real time may not be a burdensome procedure. Ultimately, this ensures that the IQCF facilitates an inflow of quality images, without erring in a direction that would create backflows or inefficiencies, impairing practical clinic workflows after the fact.</p><p>Our third major result showed that participants found that handling the VNRC system was easy and intuitive and felt comfortable with it. This addresses another relevant concern for primary care practice managers considering retinal screening systems, namely, the actual or perceived need for dedicated trained staff. Furthermore, systems with which users and operators experience repeated lack of success in producing images of sufficient quality may undermine confidence. In turn, this can nudge personnel toward lower usage [<xref ref-type="bibr" rid="ref18">18</xref>] and depress the cost-effectiveness of a screening program. Our survey results indicate that the VNRC could be an approachable system that mitigates usability barriers.</p><p>While our analyses yielded promising results, the characterization of this new DR screening system had some limitations, largely related to the generalizability of our findings. First, the images for the comparative performance analysis were collected first with the VNRC, followed by the reference camera, in order to maintain internal consistency with the larger study from which this retrospective analysis was undertaken [<xref ref-type="bibr" rid="ref26">26</xref>]. Therefore, we cannot discount the possibility of experimental bias related to the order of measurement (it could be a learning effect or a fatigue effect) that could have influenced the results. Second, our analyzable dataset for the comparative analysis images excluded participants without complete bilateral, nonmydriatic image sets and annotations, which could have exerted some selection bias toward better-quality cases; while that effect would be expected to some extent for both groups in the comparison, it may have had a differential effect that we cannot discount. Third, we cannot interpret the potential impact of not-captured images in our comparative performance analysis. Some of those noncaptures may correspond to smaller pupil sizes, since the instructions for use for Crystalvue NFC-700 require a minimum pupil size of 4 mm. Therefore, different study conditions (eg, dimming ambient light in order to dilate pupil for an attempted recapture) may yield different comparative results from the ones in this report. Fourth, the VNRC IQCF would encourage image recaptures for low-quality scores in an actual clinic setting, until the operator has a total of 3 eye images or one of the images scores above the threshold. In this study, graders received image captures according to a prespecified sequence that may not have consistently maximized quality. Therefore, future studies are warranted to investigate performance across a more diverse set of conditions. For instance, future postmarket analyses could evaluate performance relying on the images with the top IQCF score across all eye captures, or in different environmental setups (such as lighting), to better reflect performance in real-clinic conditions. Another area for future study is the characterization of variability in larger cohorts, more widely representative of primary-care screening populations. Importantly, disease characteristics (burden and type of disease) may impact the relative performance in terms of gradeability and the association of image quality with gradeability. Finally, we did not collect information on visual acuity from the participants in the user study; while results were overall favorable, visual acuity (or lack thereof) may affect user-device interactions; therefore, it will be worthwhile to gather appropriate information in future studies to better understand it.</p></sec><sec id="s4-2"><title>Conclusions</title><p>In summary, barriers to primary case-based DR screening overall are multifactorial [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. Providers tend to perceive that rigorous and cost-effective implementation of teleophthalmology in primary care settings is expensive and difficult, demanding restructuring of work processes and increasing the burden on clinic staff [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. Our results indicate that the VNRC system could mitigate some of these issues, particularly in underresourced environments. It has the positive operational characteristics akin to handheld systems, produces images of quality comparable with standard tabletop retinal cameras, and is able to optimize the inflow of quality images into clinical workflows. In addition, users note that they are able to handle the system and produce usable images with ease. Furthermore, the transfer and flow of the digital output are adaptable to typical primary care workflows. These results support considering this system as an integrated end-to-end retinal service suitable for primary care and warrant additional studies across a wider and diverse set of primary care clinics. Novel DR screening systems that address primary care adoption barriers may represent an advance toward more widespread access, with the potential to curtail rates of severe disease progression at the population level and ultimately contribute to better patient outcomes.</p></sec></sec></body><back><ack><p>The Retrospective Retina Image Analysis Study and this analysis were funded by Verily Life Sciences, San Bruno, California.</p><p>Verily Life Sciences is the funding source for this study and is responsible for data collection. Authors were fully responsible for the data analysis, interpretation presented herein, and the writing of this article. AP, CL, and MS had access to the raw data. Authors had access to the full dataset for the study, reviewed, and approved the final manuscript for submission.</p></ack><notes><sec><title>Data Availability</title><p>The raw data in this analysis are not available for sharing.</p></sec></notes><fn-group><fn fn-type="con"><p>Study concept and design were contributed by CL, SK, and AP. Data collection was handled by Verily Life Sciences. Data analysis and interpretation were contributed by AP, CL, and SK. All authors contributed to draft writing and review and draft approval for submission. All authors wish to acknowledge writing and editing support from Julia Saiz from Verily Life Sciences.</p></fn><fn fn-type="conflict"><p>All authors report employment in Verily Life Sciences. MS, CL, AP, YC, EG, HW, BL, AW, NS, LY, IA, and SK report equity ownership in Verily Life Sciences.</p><p>The Verily Retinal Camera or Verily Numetric Retinal Camera described in this report is a product for research use only, not for use in diagnostic procedures.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">DM</term><def><p>diabetes mellitus</p></def></def-item><def-item><term id="abb2">DR</term><def><p>diabetic retinopathy</p></def></def-item><def-item><term id="abb3">fps</term><def><p>frames per second</p></def></def-item><def-item><term id="abb4">IQCF</term><def><p>image quality control feature</p></def></def-item><def-item><term id="abb5">IRB</term><def><p>institutional review board</p></def></def-item><def-item><term id="abb6">VNRC</term><def><p>Verily Numetric Retinal Camera</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="report"><article-title>National diabetes statistics report</article-title><year>2024</year><month>05</month><day>15</day><access-date>2024-11-18</access-date><publisher-name>Centers for Disease Control and Prevention</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.cdc.gov/diabetes/php/data-research/?CDC_AAref_Val=https://www.cdc.gov/diabetes/pdfs/data/statistics/national-diabetes-statistics-report.pdf">https://www.cdc.gov/diabetes/php/data-research/?CDC_AAref_Val=https://www.cdc.gov/diabetes/pdfs/data/statistics/national-diabetes-statistics-report.pdf</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parker</surname><given-names>ED</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Mahoney</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Economic costs of diabetes in the U.S. in 2022</article-title><source>Diabetes Care</source><year>2024</year><month>01</month><day>1</day><volume>47</volume><issue>1</issue><fpage>26</fpage><lpage>43</lpage><pub-id pub-id-type="doi">10.2337/dci23-0085</pub-id><pub-id pub-id-type="medline">37909353</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Duh</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Stitt</surname><given-names>AW</given-names> </name></person-group><article-title>Diabetic retinopathy: current understanding, mechanisms, and treatment strategies</article-title><source>JCI Insight</source><year>2017</year><month>07</month><day>20</day><volume>2</volume><issue>14</issue><fpage>e93751</fpage><pub-id pub-id-type="doi">10.1172/jci.insight.93751</pub-id><pub-id pub-id-type="medline">28724805</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Markle</surname><given-names>J</given-names> </name><name name-style="western"><surname>Shaia</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Araich</surname><given-names>H</given-names> </name><name name-style="western"><surname>Sharma</surname><given-names>N</given-names> </name><name name-style="western"><surname>Talcott</surname><given-names>KE</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>RP</given-names> </name></person-group><article-title>Longitudinal trends and disparities in diabetic retinopathy within an aggregate health care network</article-title><source>JAMA Ophthalmol</source><year>2024</year><month>07</month><day>1</day><volume>142</volume><issue>7</issue><fpage>599</fpage><lpage>606</lpage><pub-id pub-id-type="doi">10.1001/jamaophthalmol.2024.0046</pub-id><pub-id pub-id-type="medline">38869883</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lundeen</surname><given-names>EA</given-names> </name><name name-style="western"><surname>Burke-Conte</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Rein</surname><given-names>DB</given-names> </name><etal/></person-group><article-title>Prevalence of diabetic retinopathy in the US in 2021</article-title><source>JAMA Ophthalmol</source><year>2023</year><month>08</month><day>1</day><volume>141</volume><issue>8</issue><fpage>747</fpage><lpage>754</lpage><pub-id pub-id-type="doi">10.1001/jamaophthalmol.2023.2289</pub-id><pub-id pub-id-type="medline">37318810</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><collab>American Diabetes Association Professional Practice Committee</collab></person-group><article-title>12. Retinopathy, neuropathy, and foot care: standards of care in diabetes-2024</article-title><source>Diabetes Care</source><year>2024</year><month>01</month><day>1</day><volume>47</volume><issue>Suppl 1</issue><fpage>S231</fpage><lpage>S243</lpage><pub-id pub-id-type="doi">10.2337/dc24-S012</pub-id><pub-id pub-id-type="medline">38078577</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chou</surname><given-names>CF</given-names> </name><name name-style="western"><surname>Sherrod</surname><given-names>CE</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Barriers to eye care among people aged 40 years and older with diagnosed diabetes, 2006-2010</article-title><source>Diabetes Care</source><year>2014</year><volume>37</volume><issue>1</issue><fpage>180</fpage><lpage>188</lpage><pub-id pub-id-type="doi">10.2337/dc13-1507</pub-id><pub-id pub-id-type="medline">24009300</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hartnett</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Key</surname><given-names>IJ</given-names> </name><name name-style="western"><surname>Loyacano</surname><given-names>NM</given-names> </name><name name-style="western"><surname>Horswell</surname><given-names>RL</given-names> </name><name name-style="western"><surname>Desalvo</surname><given-names>KB</given-names> </name></person-group><article-title>Perceived barriers to diabetic eye care: qualitative study of patients and physicians</article-title><source>Arch Ophthalmol</source><year>2005</year><month>03</month><volume>123</volume><issue>3</issue><fpage>387</fpage><lpage>391</lpage><pub-id pub-id-type="doi">10.1001/archopht.123.3.387</pub-id><pub-id pub-id-type="medline">15767483</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chan</surname><given-names>AX</given-names> </name><name name-style="western"><surname>McDermott Iv</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>TC</given-names> </name><etal/></person-group><article-title>Associations between healthcare utilization and access and diabetic retinopathy complications using All of Us nationwide survey data</article-title><source>PLoS ONE</source><year>2022</year><volume>17</volume><issue>6</issue><fpage>e0269231</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0269231</pub-id><pub-id pub-id-type="medline">35704625</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Daskivich</surname><given-names>LP</given-names> </name><name name-style="western"><surname>Vasquez</surname><given-names>C</given-names> </name><name name-style="western"><surname>Martinez</surname><given-names>C</given-names> </name><name name-style="western"><surname>Tseng</surname><given-names>CH</given-names> </name><name name-style="western"><surname>Mangione</surname><given-names>CM</given-names> </name></person-group><article-title>Implementation and evaluation of a large-scale teleretinal diabetic retinopathy screening program in the Los Angeles County Department of Health Services</article-title><source>JAMA Intern Med</source><year>2017</year><month>05</month><day>1</day><volume>177</volume><issue>5</issue><fpage>642</fpage><lpage>649</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2017.0204</pub-id><pub-id pub-id-type="medline">28346590</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scanlon</surname><given-names>PH</given-names> </name></person-group><article-title>The English National Screening Programme for diabetic retinopathy 2003-2016</article-title><source>Acta Diabetol</source><year>2017</year><month>06</month><volume>54</volume><issue>6</issue><fpage>515</fpage><lpage>525</lpage><pub-id pub-id-type="doi">10.1007/s00592-017-0974-1</pub-id><pub-id pub-id-type="medline">28224275</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Malerbi</surname><given-names>FK</given-names> </name><name name-style="western"><surname>Morales</surname><given-names>PH</given-names> </name><name name-style="western"><surname>Farah</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>Comparison between binocular indirect ophthalmoscopy and digital retinography for diabetic retinopathy screening: the multicenter Brazilian type 1 diabetes study</article-title><source>Diabetol Metab Syndr</source><year>2015</year><volume>7</volume><issue>1</issue><fpage>116</fpage><pub-id pub-id-type="doi">10.1186/s13098-015-0110-8</pub-id><pub-id pub-id-type="medline">26697120</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Salongcay</surname><given-names>RP</given-names> </name><name name-style="western"><surname>Aquino</surname><given-names>LAC</given-names> </name><name name-style="western"><surname>Salva</surname><given-names>CMG</given-names> </name><etal/></person-group><article-title>Comparison of handheld retinal imaging with ETDRS 7-standard field photography for diabetic retinopathy and diabetic macular edema</article-title><source>Ophthalmol Retina</source><year>2022</year><month>07</month><volume>6</volume><issue>7</issue><fpage>548</fpage><lpage>556</lpage><pub-id pub-id-type="doi">10.1016/j.oret.2022.03.002</pub-id><pub-id pub-id-type="medline">35278726</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jalkiewicz</surname><given-names>JF</given-names> </name></person-group><article-title>A screening success. NYC Health+Hospitals takes diabetic retinopathy screening to new heights</article-title><source>Ophthalmol Management</source><year>2022</year><access-date>2024-12-06</access-date><fpage>46</fpage><lpage>47</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://ophthalmologymanagement.com/issues/2022/july/a-screening-success/">https://ophthalmologymanagement.com/issues/2022/july/a-screening-success/</ext-link></comment></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fong</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Gottlieb</surname><given-names>J</given-names> </name><name name-style="western"><surname>Ferris</surname><given-names>FL</given-names> </name><name name-style="western"><surname>Klein</surname><given-names>R</given-names> </name></person-group><article-title>Understanding the value of diabetic retinopathy screening</article-title><source>Arch Ophthalmol</source><year>2001</year><month>05</month><volume>119</volume><issue>5</issue><fpage>758</fpage><lpage>760</lpage><pub-id pub-id-type="doi">10.1001/archopht.119.5.758</pub-id><pub-id pub-id-type="medline">11346406</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hudson</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Modjtahedi</surname><given-names>BS</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>D</given-names> </name><name name-style="western"><surname>Jimenez</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Luong</surname><given-names>TQ</given-names> </name><name name-style="western"><surname>Fong</surname><given-names>DS</given-names> </name></person-group><article-title>Factors affecting compliance with diabetic retinopathy screening: a qualitative study comparing English and Spanish speakers</article-title><source>Clin Ophthalmol</source><year>2022</year><volume>16</volume><issue>16</issue><fpage>1009</fpage><lpage>1018</lpage><pub-id pub-id-type="doi">10.2147/OPTH.S342965</pub-id><pub-id pub-id-type="medline">35400992</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zupan</surname><given-names>NJ</given-names> </name><name name-style="western"><surname>Shiyanbola</surname><given-names>OO</given-names> </name><etal/></person-group><article-title>Factors influencing patient adherence with diabetic eye screening in rural communities: a qualitative study</article-title><source>PLoS ONE</source><year>2018</year><volume>13</volume><issue>11</issue><fpage>e0206742</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0206742</pub-id><pub-id pub-id-type="medline">30388172</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Torres Diaz</surname><given-names>A</given-names> </name><name name-style="western"><surname>Benkert</surname><given-names>R</given-names> </name></person-group><article-title>Scaling up teleophthalmology for diabetic eye screening: opportunities for widespread implementation in the USA</article-title><source>Curr Diab Rep</source><year>2019</year><month>08</month><day>2</day><volume>19</volume><issue>9</issue><fpage>74</fpage><pub-id pub-id-type="doi">10.1007/s11892-019-1187-5</pub-id><pub-id pub-id-type="medline">31375932</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><article-title>NFC-700 fully automated non-mydriatic retinal camera</article-title><source>Crystalvue</source><access-date>2024-11-18</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.crystalvue.com.tw/data/files/1623122722411841591.pdf">https://www.crystalvue.com.tw/data/files/1623122722411841591.pdf</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="report"><article-title>510(k) summary</article-title><year>2019</year><month>01</month><day>2</day><access-date>2024-11-18</access-date><publisher-name>US Food and Drug Administration</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.accessdata.fda.gov/cdrh_docs/pdf18/K182199.pdf">https://www.accessdata.fda.gov/cdrh_docs/pdf18/K182199.pdf</ext-link></comment></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>DeHoog</surname><given-names>E</given-names> </name><name name-style="western"><surname>Schwiegerling</surname><given-names>J</given-names> </name></person-group><article-title>Fundus camera systems: a comparative analysis</article-title><source>Appl Opt</source><year>2009</year><month>01</month><day>10</day><volume>48</volume><issue>2</issue><fpage>221</fpage><lpage>228</lpage><pub-id pub-id-type="doi">10.1364/ao.48.000221</pub-id><pub-id pub-id-type="medline">19137032</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bista Karki</surname><given-names>S</given-names> </name><name name-style="western"><surname>Coppell</surname><given-names>KJ</given-names> </name><name name-style="western"><surname>Mitchell</surname><given-names>LV</given-names> </name><name name-style="western"><surname>Ogbuehi</surname><given-names>KC</given-names> </name></person-group><article-title>Dynamic pupillometry in type 2 diabetes: pupillary autonomic dysfunction and the severity of diabetic retinopathy</article-title><source>Clin Ophthalmol</source><year>2020</year><volume>14</volume><fpage>3923</fpage><lpage>3930</lpage><pub-id pub-id-type="doi">10.2147/OPTH.S279872</pub-id><pub-id pub-id-type="medline">33244218</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Kavusi</surname><given-names>S</given-names> </name><name name-style="western"><surname>El Gamal</surname><given-names>A</given-names> </name></person-group><article-title>Quantitative study of high-dynamic-range image sensor architectures</article-title><year>2004</year><month>06</month><day>7</day><conf-name>Sensors and Camera Systems for Scientific, Industrial, and Digital Photography Applications V</conf-name><conf-loc>San Jose, CA</conf-loc><publisher-name>SPIE</publisher-name><fpage>264</fpage><lpage>275</lpage><pub-id pub-id-type="doi">10.1117/12.544517</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sahu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>AK</given-names> </name><name name-style="western"><surname>Ghrera</surname><given-names>SP</given-names> </name><name name-style="western"><surname>Elhoseny</surname><given-names>M</given-names> </name></person-group><article-title>An approach for de-noising and contrast enhancement of retinal fundus image using CLAHE</article-title><source>Optics &#x0026; Laser Technology</source><year>2019</year><month>02</month><volume>110</volume><fpage>87</fpage><lpage>98</lpage><pub-id pub-id-type="doi">10.1016/j.optlastec.2018.06.061</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Youssif</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Ghalwash</surname><given-names>AZ</given-names> </name><name name-style="western"><surname>Ghoneim</surname><given-names>AS</given-names> </name></person-group><article-title>Comparative study of contrast enhancement and illumination equalization methods for retinal vasculature segmentation</article-title><year>2006</year><access-date>2025-07-03</access-date><conf-name>Cairo International Biomedical Engineering Conference (CIBEC&#x2019;06)</conf-name><fpage>1</fpage><lpage>5</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.researchgate.net/publication/224014522_Comparative_Study_of_Contrast_Enhancement_and_Illumination_Equalization_Methods_for_Retinal_Vasculature_Segmentation">https://www.researchgate.net/publication/224014522_Comparative_Study_of_Contrast_Enhancement_and_Illumination_Equalization_Methods_for_Retinal_Vasculature_Segmentation</ext-link></comment></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Makedonsky</surname><given-names>K</given-names> </name><name name-style="western"><surname>Patwardhan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>A</given-names> </name><name name-style="western"><surname>Silvestrini</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lui</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kavusi</surname><given-names>S</given-names> </name></person-group><article-title>Technical feasibility study for a retinal camera system</article-title><source>Endocrinology (including Diabetes Mellitus and Metabolic Disease)</source><comment>Preprint posted online on 2024</comment><pub-id pub-id-type="doi">10.1101/2024.12.20.24319469</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bascaran</surname><given-names>C</given-names> </name></person-group><article-title>Key considerations when choosing a retinal camera for diabetic retinopathy screening</article-title><source>Community Eye Health</source><year>2023</year><volume>36</volume><issue>119</issue><fpage>4</fpage><lpage>5</lpage><pub-id pub-id-type="medline">37600682</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cuadros</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bresnick</surname><given-names>G</given-names> </name></person-group><article-title>Can commercially available handheld retinal cameras effectively screen diabetic retinopathy?</article-title><source>J Diabetes Sci Technol</source><year>2017</year><month>01</month><volume>11</volume><issue>1</issue><fpage>135</fpage><lpage>137</lpage><pub-id pub-id-type="doi">10.1177/1932296816682033</pub-id><pub-id pub-id-type="medline">28264174</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Leonard</surname><given-names>C</given-names> </name></person-group><article-title>Retinal imaging on the go</article-title><source>Rev Ophthalmol</source><year>2020</year><access-date>2024-12-06</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.reviewofophthalmology.com/article/retinal-imaging-on-the-go">https://www.reviewofophthalmology.com/article/retinal-imaging-on-the-go</ext-link></comment></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Widner</surname><given-names>K</given-names> </name><name name-style="western"><surname>Virmani</surname><given-names>S</given-names> </name><name name-style="western"><surname>Krause</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Lessons learned from translating AI from development to deployment in healthcare</article-title><source>Nat Med</source><year>2023</year><month>06</month><volume>29</volume><issue>6</issue><fpage>1304</fpage><lpage>1306</lpage><pub-id pub-id-type="doi">10.1038/s41591-023-02293-9</pub-id><pub-id pub-id-type="medline">37248297</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dow</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>NC</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>KM</given-names> </name><etal/></person-group><article-title>AI-human hybrid workflow enhances teleophthalmology for the detection of diabetic retinopathy</article-title><source>Ophthalmol Sci</source><year>2023</year><month>12</month><volume>3</volume><issue>4</issue><fpage>100330</fpage><pub-id pub-id-type="doi">10.1016/j.xops.2023.100330</pub-id><pub-id pub-id-type="medline">37449051</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aquino</surname><given-names>LAC</given-names> </name><name name-style="western"><surname>Salongcay</surname><given-names>RP</given-names> </name><name name-style="western"><surname>Alog</surname><given-names>GP</given-names> </name><etal/></person-group><article-title>Comparison of 2-field and 5-field mydriatic handheld retinal imaging in a community-based diabetic retinopathy screening program</article-title><source>Ophthalmologica</source><year>2023</year><volume>246</volume><issue>3-4</issue><fpage>203</fpage><lpage>208</lpage><pub-id pub-id-type="doi">10.1159/000530903</pub-id><pub-id pub-id-type="medline">37231995</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kubin</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Huhtinen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Ohtonen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Keskitalo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wirkkala</surname><given-names>J</given-names> </name><name name-style="western"><surname>Hautala</surname><given-names>N</given-names> </name></person-group><article-title>Comparison of 21 artificial intelligence algorithms in automated diabetic retinopathy screening using handheld fundus camera</article-title><source>Ann Med</source><year>2024</year><month>12</month><volume>56</volume><issue>1</issue><fpage>2352018</fpage><pub-id pub-id-type="doi">10.1080/07853890.2024.2352018</pub-id><pub-id pub-id-type="medline">38738798</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Piyasena</surname><given-names>M</given-names> </name><name name-style="western"><surname>Murthy</surname><given-names>GVS</given-names> </name><name name-style="western"><surname>Yip</surname><given-names>JLY</given-names> </name><etal/></person-group><article-title>Systematic review on barriers and enablers for access to diabetic retinopathy screening services in different income settings</article-title><source>PLoS One</source><year>2019</year><volume>14</volume><issue>4</issue><fpage>e0198979</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0198979</pub-id><pub-id pub-id-type="medline">31013274</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Serpas</surname><given-names>L</given-names> </name><name name-style="western"><surname>Genter</surname><given-names>P</given-names> </name><name name-style="western"><surname>Anderson</surname><given-names>B</given-names> </name><name name-style="western"><surname>Campa</surname><given-names>D</given-names> </name><name name-style="western"><surname>Ipp</surname><given-names>E</given-names> </name></person-group><article-title>Divergent perceptions of barriers to diabetic retinopathy screening among patients and care providers, Los Angeles, California, 2014&#x2013;2015</article-title><source>Prev Chronic Dis</source><year>2014</year><volume>13</volume><issue>13</issue><pub-id pub-id-type="doi">10.5888/pcd13.160193</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Panwar</surname><given-names>N</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>P</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Fundus photography in the 21st century--a review of recent technological advances and their implications for worldwide healthcare</article-title><source>Telemed J E Health</source><year>2016</year><month>03</month><volume>22</volume><issue>3</issue><fpage>198</fpage><lpage>208</lpage><pub-id pub-id-type="doi">10.1089/tmj.2015.0068</pub-id><pub-id pub-id-type="medline">26308281</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Additional methods and results.</p><media xlink:href="formative_v9i1e70331_app1.docx" xlink:title="DOCX File, 2492 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Consent form.</p><media xlink:href="formative_v9i1e70331_app2.pdf" xlink:title="PDF File, 185 KB"/></supplementary-material></app-group></back></article>