<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id><journal-id journal-id-type="publisher-id">formative</journal-id><journal-id journal-id-type="index">27</journal-id><journal-title>JMIR Formative Research</journal-title><abbrev-journal-title>JMIR Form Res</abbrev-journal-title><issn pub-type="epub">2561-326X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v9i1e65836</article-id><article-id pub-id-type="doi">10.2196/65836</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Virtual Reality Gamification of Visual Search, Response Inhibition, and Visual Short-Term Memory Tasks for Cognitive Assessment: Experimental Study</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Hadjiaros</surname><given-names>Marios</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Shimi</surname><given-names>Andria</given-names></name><degrees>Prof Dr</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Neokleous</surname><given-names>Kleanthis</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Pattichis</surname><given-names>Constantinos</given-names></name><degrees>Prof Dr</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Avraamides</surname><given-names>Marios</given-names></name><degrees>Prof Dr</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff4">4</xref></contrib></contrib-group><aff id="aff1"><institution>CYENS&#x2014;Centre of Excellence</institution><addr-line>1 Dimarchou Lellou Demetriadi</addr-line><addr-line>Nicosia</addr-line><country>Cyprus</country></aff><aff id="aff2"><institution>Department of Computer Science, University of Cyprus</institution><addr-line>Nicosia</addr-line><country>Cyprus</country></aff><aff id="aff3"><institution>Biomedical Engineering Research Centre, University of Cyprus</institution><addr-line>Nicosia</addr-line><country>Cyprus</country></aff><aff id="aff4"><institution>Department of Psychology, University of Cyprus</institution><addr-line>Nicosia</addr-line><country>Cyprus</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Mavragani</surname><given-names>Amaryllis</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Riedemann</surname><given-names>Lars</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Concannon</surname><given-names>Brendan</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Costa</surname><given-names>Jose Ferrer</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Mendes</surname><given-names>Liliana</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Marios Hadjiaros, PhD, CYENS&#x2014;Centre of Excellence, 1 Dimarchou Lellou Demetriadi, Nicosia, 1016, Cyprus, 357 99094970; <email>marios.hadjiaros@gmail.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>29</day><month>7</month><year>2025</year></pub-date><volume>9</volume><elocation-id>e65836</elocation-id><history><date date-type="received"><day>27</day><month>08</month><year>2024</year></date><date date-type="rev-recd"><day>05</day><month>05</month><year>2025</year></date><date date-type="accepted"><day>25</day><month>05</month><year>2025</year></date></history><copyright-statement>&#x00A9; Marios Hadjiaros, Andria Shimi, Kleanthis Neokleous, Constantinos Pattichis, Marios Avraamides. Originally published in JMIR Formative Research (<ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>), 29.7.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://formative.jmir.org/2025/1/e65836"/><abstract><sec><title>Background</title><p>Cognitive tasks are foundational tools in psychology and neuroscience for studying attention, perception, and memory. However, they typically employ simple or artificial stimuli and require numerous repetitive trials, which can adversely affect participant engagement and ecological validity.</p></sec><sec><title>Objective</title><p>This study investigated whether gamified versions of 3 established cognitive tasks, namely, the Visual Search task (attention), the Whack-the-Mole task (response inhibition), and the Corsi block-tapping test (visual short-term memory), replicate the typical patterns of results reported for their traditional counterparts. It also examined whether the method of administration&#x2014;in immersive virtual reality (VR) versus desktop computer, and in the laboratory versus at home&#x2014;influences performance.</p></sec><sec sec-type="methods"><title>Methods</title><p>Seventy-five participants (male=24, female=51; age range 18&#x2010;35 years; mean 23.15, SD 4.38 years) were randomly assigned to 1 of 3 administration conditions (n=25 each). In the VR-Lab condition, participants completed the tasks in immersive VR within the laboratory; in the Desktop-Lab condition, they completed the tasks on a 2D desktop screen in the laboratory; and in the Desktop-Remote condition, participants completed the tasks on their personal computers at home. All participants completed the same gamified tasks while seated, entering responses with either a mouse or a VR controller, depending on the condition.</p></sec><sec sec-type="results"><title>Results</title><p>The results obtained from these gamified tasks across all 3 administration conditions replicated the typical performance patterns observed with their traditional counterparts, despite using more ecologically valid stimuli and fewer trials. However, administration modality did influence certain performance measures, particularly reaction times (RTs) and task efficiency. Specifically, in the Visual Search task, RTs were significantly faster in the VR-Lab condition (mean 1.24 seconds) than in the Desktop-Lab (mean 1.49 seconds; <italic>P</italic>&#x003C;.001) and Desktop-Remote (mean 1.44 seconds; <italic>P</italic>=.008) conditions. In the Whack-the-Mole task, no significant group differences emerged in d&#x2019; scores (VR-Lab: mean 3.79, Desktop-Remote: mean 3.75, Desktop-Lab: mean 3.62; <italic>P</italic>=.49), but RTs were slower in the Desktop-Remote condition (mean 0.64 seconds) than in the VR-Lab (mean 0.41 seconds; <italic>P</italic>&#x003C;.001) and Desktop-Lab (mean 0.48 seconds; <italic>P</italic>&#x003C;.001) conditions. For the Corsi block-tapping test, no significant group differences in span scores were found (VR-Lab: mean 5.48, Desktop-Lab: mean 5.68, and Desktop-Remote: mean 5.24; <italic>P</italic>=.24). Finally, a significant positive correlation was observed between RTs for Hits in the Whack-the-Mole task and feature search trials in the Visual Search task (<italic>r</italic>=0.24; <italic>P</italic>=.04).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Gamified cognitive tasks administered in VR replicated established behavioral patterns observed with their traditional versions while improving ecological validity and reducing task duration. Administration modality had limited effects on overall outcomes, although RTs were slower in remote settings. These findings support the feasibility of using gamified VR tasks for scalable and ecologically valid cognitive assessment. Overall, the study underscores the potential of VR to increase participant engagement and enrich cognitive research through more immersive and motivating testing environments.</p></sec></abstract><kwd-group><kwd>attention</kwd><kwd>memory</kwd><kwd>virtual reality</kwd><kwd>visuospatial</kwd><kwd>serious games</kwd><kwd>cognitive assessment</kwd><kwd>Corsi Test</kwd><kwd>Visual Search</kwd><kwd>Go/No-Go</kwd><kwd>response inhibition</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>Cognitive tasks are commonly used in Psychology and Neuroscience to investigate various mental processes, such as attention, memory, and perception [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. These tasks often involve simple stimuli, such as alphanumeric characters and shapes and a large number of trials, a deliberate design choice aimed at controlling for extraneous variables and ensuring the reliability of the results [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. While this methodological approach is crucial for maintaining experimental rigor and validity, it may inadvertently lead to issues regarding participant engagement [<xref ref-type="bibr" rid="ref5">5</xref>]. For example, the repetitive tasks and the simple stimuli may cause participant disinterest, boredom, and fatigue, which in turn may compromise data quality or introduce unintended biases [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>]. Thus, while striving for methodological control, researchers must also consider strategies to enhance participant engagement to mitigate these potential challenges and maintain the integrity of their findings.</p></sec><sec id="s1-2"><title>Virtual Reality as a Tool for Gamified Cognitive Assessment</title><p>Virtual reality (VR) technology presents a promising avenue for addressing the engagement challenges inherent in traditional cognitive tasks [<xref ref-type="bibr" rid="ref8">8</xref>]. By leveraging VR, researchers can gamify tasks, transforming them into immersive and interactive experiences that are inherently more engaging for participants [<xref ref-type="bibr" rid="ref9">9</xref>].</p><p>Numerous studies have indicated that gamification in cognitive tasks can significantly increase participant engagement and motivation [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. Recent studies exploring VR gaming have emphasized the significant role of user engagement and motivation in enhancing the overall experience. One key factor driving VR game adoption is the concept of &#x201C;flow,&#x201D; which is a state of optimal immersion where players experience intense enjoyment and concentration. This &#x201C;flow&#x201D; state is essential for fostering deeper engagement with VR games, particularly as the player becomes more immersed in the virtual environment. The sense of spatial presence, or the feeling of being physically located within the VR world, is another major motivator, as it strengthens the emotional connection to the game and enhances relaxation or enjoyment [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>].</p></sec><sec id="s1-3"><title>Cognitive Tasks Under Investigation</title><p>The Visual Search task is a widely used method for investigating the mechanisms of attention involved in locating a target among distractors, mirroring real-world scenarios such as finding a friend in a crowded cafeteria, searching for an item in the grocery store, or searching a target object among nontarget objects during security screening. In laboratory settings, a typical Visual Search task involves presenting a target object on the computer screen among distractors [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. Stimuli often consist of alphanumeric characters or simple geometric shapes. Visual search tasks typically involve 2 key manipulations: similarity and display size. Similarity refers to the type of search that is determined by how visually distinct the target is from the distractors. In <italic>feature search</italic> trials, the target differs from distractors in terms of a single feature, such as its shape (eg, locating the letter &#x201C;O&#x201D; among several &#x201C;Xs&#x201D;) or its color (eg, locating a red &#x201C;X&#x201D; among white &#x201C;Xs&#x201D;). In <italic>conjunction search</italic> trials, the target is defined by a combination of features that are also present in the distractors (eg, locating a green &#x201C;O&#x201D; among green &#x201C;Xs&#x201D; and red &#x201C;Os&#x201D;). The second manipulation is the display size, which refers to the number of distractors presented in each trial alongside the target [<xref ref-type="bibr" rid="ref16">16</xref>]. The typical pattern of results obtained across many studies is that (1) reaction times (RTs) (ie, the time it takes the participant to find the target from the onset of the display) are shorter for feature search trials than conjunction search trials, (2) RT increases significantly with display size, a phenomenon known as the &#x201C;display size effect,&#x201D; but only in conjunction search trials [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>], and (3) RT remains relatively constant across display sizes in feature search trials, a phenomenon known as the &#x201C;pop-out effect&#x201D; [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>The Whack-the-Mole task is a Go/No-Go task that provides a measure of response inhibition, that is, the ability to inhibit inappropriate prepotent responses [<xref ref-type="bibr" rid="ref20">20</xref>]. Past research has shown that response inhibition is a key ability for cognitive [<xref ref-type="bibr" rid="ref21">21</xref>] and psychosocial [<xref ref-type="bibr" rid="ref22">22</xref>] development and is a reliable predictor of school readiness and academic achievement in domains such as math and reading [<xref ref-type="bibr" rid="ref23">23</xref>]. Impairments in response inhibition are commonly observed in neurodevelopmental disorders such as autism [<xref ref-type="bibr" rid="ref24">24</xref>] and attention-deficit/hyperactivity disorder [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>] as well as in psychiatric and neurological disorders such as borderline personality disorder [<xref ref-type="bibr" rid="ref27">27</xref>] and Parkinson disease [<xref ref-type="bibr" rid="ref28">28</xref>]. In computerized Go/No-Go tasks, participants are required to execute an action rapidly (eg, press a button) upon detecting a specific stimulus (Go trials) and withhold a response when presented with a different stimulus (No-Go trials) [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. To ensure task effectiveness, the majority of trials are typically Go trials (approximately 75%), priming the execution of a response [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. Response inhibition is assessed by computing d&#x2019;, a sensitivity metric that takes into account the proportion of hits (ie, correctly executed responses in Go trials) and false alarms (ie, incorrect execution of responses in No-Go trials) [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]. Although d&#x2019; may vary depending on the manipulations of the experiment, such as the discriminability of the stimuli, its typical value, indicating moderate sensitivity, is between 3 and 4 [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. In general, participants exhibit false alarms in 10% of the No-Go trials [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>].</p><p>The Corsi Test is one of the most widely used tasks to assess visuospatial short-term memory. The original version [<xref ref-type="bibr" rid="ref31">31</xref>] consists of 9 physical cubes placed on a board. Each trial involves the experimenter sequentially touching a series of blocks from one side of the board, and the participant, situated on the opposite side, is required to reproduce the sequence by touching the blocks in the same order. If the participant successfully reproduces the sequence, then the number of blocks touched in the sequence is increased by 1 [<xref ref-type="bibr" rid="ref31">31</xref>-<xref ref-type="bibr" rid="ref33">33</xref>]. The task continues until the participant fails to accurately reproduce the sequence [<xref ref-type="bibr" rid="ref34">34</xref>]. The longest sequence that is reproduced correctly indexes the participant&#x2019;s visuospatial short-term memory span. Studies show that healthy adults have a typical span between 5 and 7 [<xref ref-type="bibr" rid="ref35">35</xref>]. In more recent implementations, electronic versions of the task have been developed and delivered through computers or mobile devices, where targets light up in a specific sequence that participants reproduce by clicking or touching the targets. Even in these modern versions of the task, the typical span ranges between 5 and 7 [<xref ref-type="bibr" rid="ref33">33</xref>].</p></sec><sec id="s1-4"><title>Related Work</title><p>Immersive VR limits extraneous distractions from the surrounding environment [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. This is because immersive VR setups entail wearing a head-mounted display (HMD) that effectively blocks out the external environment, thereby reducing potential environmental distractions that may interfere with task performance [<xref ref-type="bibr" rid="ref38">38</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. This feature may be particularly important in cognitive research, where maintaining participants&#x2019; focus and attention is being assessed [<xref ref-type="bibr" rid="ref40">40</xref>] and a measure of &#x201C;true&#x201D; ability under no external distraction is needed. It is, therefore, imperative to ascertain whether these gamified tasks, administered with or without immersive VR, produce the expected pattern of results as their traditional counterparts, an indication that they capture the same underlying cognitive processes. Recent studies have investigated the consistency and relative performance of computerized and VR-based cognitive tasks, revealing nuanced differences between the 2 modalities. These findings underscore the importance of task context and cognitive demands when evaluating their efficacy.</p><p>For example, Barrett et al [<xref ref-type="bibr" rid="ref41">41</xref>] compared VR, 3D desktop, and 2D versions of a category learning task in which participants classify visual stimuli into categories based on 2 relevant features while ignoring a third irrelevant one. Feedback is provided after each trial to support learning. RTs in the 2D condition were significantly faster than in the VR and 3D conditions, and fixation durations were shorter, indicating reduced cognitive load for the 2D environment. However, participants reported higher levels of immersion and engagement in the VR and 3D conditions, suggesting that while traditional computerized tasks may facilitate quicker responses, VR environments offer richer experiential benefits.</p><p>This trade-off between speed and immersion is also reflected in findings from Bhargava et al [<xref ref-type="bibr" rid="ref42">42</xref>], who examined cognitive task performance across VR and mobile-based conditions using custom-designed games. Bhargava et al [<xref ref-type="bibr" rid="ref42">42</xref>] validated 3 custom-designed games: 2 VR-based (Navigation and Hand-Eye Coordination) and 1 tablet-based (Memory) against the ACE-III (Addenbrooke&#x2019;s Cognitive Examination&#x2014;III) cognitive assessment tool in a cohort of young adults. The ACE-III is a comprehensive tool used to assess cognitive functioning across 5 domains: attention, memory, language, verbal fluency, and visuospatial skills. The study found no significant differences in overall task performance between the VR and 3D mobile conditions; however, the VR tasks elicited significantly higher motivation and engagement scores. These findings suggest that VR can enhance user experience and engagement without compromising the validity or reliability of cognitive assessment.</p><p>This enhanced motivational effect of VR-based tasks is further reinforced by Faria et al [<xref ref-type="bibr" rid="ref43">43</xref>], who explored VR in the context of cognitive rehabilitation. Similar findings are reported by Faria et al [<xref ref-type="bibr" rid="ref43">43</xref>], who evaluated 2 approaches to personalized cognitive rehabilitation&#x2014;1 VR-based and 1 traditional&#x2014;with patients with chronic stroke. Both methods were equally effective in improving cognitive performance, but the VR approach led to higher participant satisfaction and engagement due to its interactive features. This suggests that VR can deliver comparable therapeutic outcomes with added motivational benefits.</p><p>In addition to motivation, several studies emphasize the ecological validity of VR, a point illustrated well in the work by Tan et al [<xref ref-type="bibr" rid="ref44">44</xref>]. Tan et al [<xref ref-type="bibr" rid="ref44">44</xref>] assessed a VR-based cognitive screening tool targeting 6 cognitive domains. Their results showed high accuracy in detecting cognitive impairments, comparable with traditional methods, with the added benefit of enhanced ecological validity in VR. Participants were able to perform naturalistic tasks, such as navigation and object interaction, which are difficult to replicate in conventional computerized tests.</p><p>This growing interest in ecological validity and realism of cognitive assessments in VR is echoed in the meta-analysis by Negu&#x0163; et al [<xref ref-type="bibr" rid="ref45">45</xref>], which further consolidates the strengths and challenges of immersive assessments. Finally, Negu&#x0163; et al [<xref ref-type="bibr" rid="ref45">45</xref>] conducted a meta-analysis comparing VR-based assessments with classical paper-and-pencil or computerized measures. They reported that VR tasks often felt more challenging due to their immersive and dynamic nature, which can increase perceived cognitive load. However, the ecological validity and potential for more realistic stimuli in VR environments often make them more representative of real-world cognitive challenges.</p><p>In summary, past research shows that VR-based tasks enhance immersion, engagement, and ecological validity, making them a compelling alternative to traditional assessments. While RTs may vary depending on task design and context, this trade-off is often outweighed by the benefits of more naturalistic and interactive environments. These advantages position VR as a valuable complement to conventional methods, particularly in contexts that require high levels of interaction, naturalistic task performance, or strong participant motivation.</p><p>Building on this body of work, this study extends previous research by directly comparing attention and memory performance in immersive VR with desktop settings, both in-lab and remote, to assess the impact of presentation medium and testing environment on cognitive outcomes. Related to this question, several past studies using a variety of tasks have indicated that the quality of data collected on the web remains uncompromised [<xref ref-type="bibr" rid="ref46">46</xref>-<xref ref-type="bibr" rid="ref52">52</xref>]. However, web-based data frequently exhibit greater variability in contrast to data gathered in controlled laboratory environments [<xref ref-type="bibr" rid="ref51">51</xref>]. This variability could either stem from the diminished control over experimental conditions, as previously mentioned, or it could be an intrinsic characteristic of studying more diverse participant cohorts, thereby becoming a variable of interest in its own right [<xref ref-type="bibr" rid="ref53">53</xref>].</p><p>This issue of variability is particularly relevant in light of findings by Segen et al [<xref ref-type="bibr" rid="ref54">54</xref>], whose online versus lab-based comparison provides an important context for interpreting remote testing results. Notably, Segen et al [<xref ref-type="bibr" rid="ref54">54</xref>] found that the data collected in a conventional laboratory setting and those collected on the web produced very similar results, although the web-based data were more variable, with SEs being about 10% larger than those of the data collected in the lab. However, Segen et al [<xref ref-type="bibr" rid="ref54">54</xref>] have used a scene recognition task that might be less prone to the environmental distraction that is typically present when carrying out a task remotely compared with the attention and memory tasks we used here.</p></sec><sec id="s1-5"><title>Problem Statement and Objective</title><p>The heightened engagement resulting from the use of VR may come at a cost to experimental control. The dynamic and immersive nature of VR environments introduces additional variables that may influence participants&#x2019; cognitive processes, potentially altering the patterns of results obtained compared with their traditional counterparts. Therefore, it becomes imperative for scientists to systematically evaluate whether gamifying cognitive tasks in VR environments yields findings that are consistent with those obtained with traditional methods [<xref ref-type="bibr" rid="ref19">19</xref>]. Doing so would ensure that while enhancing engagement with gamification, researchers can still maintain the methodological rigor necessary for robust scientific conclusions. Therefore, the main objective of this study was to examine whether gamified versions of the Visual Search, the Whack-the-Mole, and the Corsi block tasks, each assessing a distinct cognitive process, produce similar patterns of results as their traditional versions. A second objective was to examine whether the way the gamified tasks are administered influences the pattern of results obtained. To this purpose, we compared results across 3 different conditions that involved the same gamified cognitive tasks: an immersive VR condition (VR-Lab), a desktop VR condition (Desktop-Lab; nonimmersive), and a desktop VR condition in which participants carried out the task at home (Desktop-Remote; nonimmersive). By conducting this comparison, we aimed to elucidate the influence of immersive VR on task outcomes, providing insights into the effectiveness of different task formats.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Participants</title><p>The experimental study included 75 healthy young adults (male=24, female=51) with an overall mean age of 23.15 years (SD 4.38 years) recruited from the student community of the University of Cyprus and the CYENS&#x2014;Centre of Excellence. Recruitment was conducted using a convenience sampling method through email announcements, web-based student forums and platforms, and direct invitations. All participants met the following inclusion criteria: (1) aged between 18 and 35 years, (2) normal or corrected-to-normal vision, and (3) no reported history of neurological, psychiatric, or cognitive impairments that could affect task performance. Eligibility was determined through a brief prescreening interview. Individuals with a self-reported history of neurological disorders (eg, epilepsy and traumatic brain injury), psychiatric conditions (eg, attention-deficit/hyperactivity disorder, anxiety, and depression), or uncorrected visual impairments were excluded from participation. Participants were randomly assigned to 1 of 3 between-subject conditions: VR-Lab, Desktop-Lab, or Desktop-Remote, with 25 participants in each group. No participants were excluded from the statistical analyses, and there were no dropouts. All laboratory-based sessions (VR-Lab and Desktop-Lab conditions) took place at the Experimental Psychology Laboratory at the University of Cyprus, a controlled environment designed to minimize external distractions.</p></sec><sec id="s2-2"><title>Materials</title><sec id="s2-2-1"><title>Visual Search Task</title><p>The Visual Search task used in this study involved the presentation of brown and gray axes (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>), with the target object changing from trial to trial. In feature search trials, the target differed from the distractors in terms of color, for example, a gray axe presented among brown axes. In contrast, in conjunction search trials, the target was defined by a combination of features that were also present in the distractors. For example, the target could be a brown 1-sided axe presented among gray 1-sided axes and brown double-sided axes (<xref ref-type="fig" rid="figure1">Figure 1</xref>). To examine the influence of display size, we varied the number of axes in both trial types to 5, 15, 25, or 40. Participants were instructed to search the display for the target and provide a response as soon as they detected it. In every trial, the targets were presented for 1.5 seconds, followed by the search display. Overall, the task consisted of 96 trials (50% feature search trials and 50% conjunction search trials), with 8 trials in each possible combination of search type and display size. Before the experimental trials, participants performed 8 practice trials, 1 in each combination of search type (Feature vs Conjunction Search) and display size (5, 15, 25, or 40) to familiarize themselves with the task and procedure. Trials were presented in a different random order for each participant. Participants in the VR-Lab condition searched for the target and selected it by directing a laser beam extended from the VR controller toward the target. The response was logged when the laser beam was on the target for 2 seconds; however, the RT was measured from the moment the beam first landed on the target. Participants in the Desktop-Lab and Desktop-Remote conditions responded by moving the mouse cursor to the target and clicking on it. In all 3 conditions (VR-Lab, Desktop-Lab, and Desktop-Remote), participants had the whole board with the axes in their field of view and thus did not need to turn their heads to search.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Example of a conjunction search trial in the Visual Search task of the experimental study. In this trial, participants searched for a brown 1-sided axe (target) among gray 1-sided axes and brown double-sided axes (distractors). This is a trial with a display size of 15. The red outline is added for illustration purposes and was not present in the actual display viewed by participants.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig01.png"/></fig></sec><sec id="s2-2-2"><title>Whack-the-Mole Task</title><p>In our version of the Whack-the-Mole task that was modeled after Casey et al [<xref ref-type="bibr" rid="ref55">55</xref>], participants used a mallet to hit moles that popped up with random disguises from the hole in the center of the ground. In Go trials, a mole appeared with a varying disguise in each trial, and participants were instructed to hit it as fast as they could by either pulling the trigger on the handheld controller with the VR-Lab condition or clicking the mouse in the Desktop-Lab and Desktop-Remote conditions. Participants in the VR-Lab condition held a virtual mallet mapped to the controller in their dominant hand and hit the mole by pulling the trigger with their index finger. Similarly, participants in the Desktop-Lab and Desktop-Remote conditions clicked the mouse with the index finger of their dominant hand to hit the mole. In No-Go trials, mushrooms appeared instead of the mole, and participants were instructed to withhold a response (<xref ref-type="fig" rid="figure2">Figure 2</xref>). Before the experimental task, participants were shown the 7 disguises of the mole and the 3 types of mushrooms and carried out 10 practice trials. As shown in <xref ref-type="fig" rid="figure2">Figure 2</xref>, the task included 7 different mole disguises and 3 different types of mushrooms. Each participant completed 100 trials, with 75 being the Go trials and 25 being the No-Go trials. The 2 trial types were presented randomly within the task. In all 3 conditions (VR-Lab, Desktop-Lab, and Desktop-Remote), participants had a clear view of the ground area from which the mole emerged, without the need to turn their heads.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>An illustration of the Whack-the-Mole task as used in the study. On the left, the 3 different types of mushrooms (top row) and the 7 different mole disguises (middle and bottom rows) are shown. On the right, an example Go trial is shown. In the VR-Lab condition, participants had to pull the trigger on the handheld controller to respond. In the Desktop-Lab and Desktop-Remote conditions, they had to click the mouse. In No-Go trials, showing different types of mushrooms, participants had to refrain from any response.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig02.png"/></fig></sec><sec id="s2-2-3"><title>Corsi Test</title><p>In this task, participants were presented with a 4&#x00D7;4 matrix of gray cubes (<xref ref-type="fig" rid="figure3">Figure 3</xref>). Each trial involved a unique sequence of cubes changing color from gray to yellow sequentially, and participants were required to replicate the sequence by tapping the blocks that changed color in the same order. Participants in the VR-Lab condition used the VR controllers to touch the blocks, whereas those in the Desktop-Lab and Desktop-Remote conditions clicked the blocks with the mouse. The task began with a single cube and the length of the sequence increased as participants responded correctly. Participants were given 3 attempts at each span length and advanced to the next length if they responded correctly in at least 2 attempts. The block span for each participant was measured as the length of the last sequence that was repeated correctly in at least 2 out of 3 attempts. After completing the tasks, participants in the Desktop-Remote condition were asked to click on a save button that sent their data to a cloud database. In all 3 conditions (VR-Lab, Desktop-Lab, and Desktop-Remote), the whole 4&#x00D7;4 matrix was within the participants&#x2019; field of view, so no head turning was required.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>An illustration of the matrix in the Corsi Test as used in this study. Participants viewed a sequence of cubes turning yellow and then recreated it by either touching the cubes with the handheld controllers (VR-Lab condition) or clicking on each cube with the mouse (Desktop-Lab and Desktop-Remote participants).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig03.png"/></fig></sec><sec id="s2-2-4"><title>Procedure</title><p>Participants were initially contacted via email with details about the study and available time slots for participation. Those interested registered electronically by selecting a suitable time slot. Upon arrival at the lab, participants read a detailed description of the study and signed a consent form if they agreed to take part. No participants withdrew from the experimental procedure. Participants in the Desktop-Remote condition followed the same process, with the only difference being that they completed the study from home through a secure hyperlink and communicated with the experimenter (first author) via video call. They completed the tasks on their personal computers, and they were instructed to select a quiet, distraction-free space and to use a desktop or laptop computer with a stable internet connection. During the experiment, all participants were seated and instructed to use only their hand to operate either the mouse or the VR controller, ensuring that movement was limited primarily to the wrist and minor elbow motion. This protocol was applied consistently across all 3 tasks, regardless of condition. Before starting the experiment, participants in all conditions underwent a brief orientation session where they were familiarized with the tasks and response methods.</p><p>Participants in the VR-Lab condition carried out the tasks in an HTC Vive Pro HMD and used the Vive handheld controllers to respond. Participants in the Desktop-Lab condition carried out the task on the same desktop computer that was used for the VR-Lab condition but viewed the tasks on a 24" LCD display. The order of the 3 tasks was counterbalanced across conditions using a 3&#x00D7;3 Latin square design. For all tasks, data in the VR-Lab and Desktop-Lab conditions were automatically recorded by the computer running the experiment. In the 2 lab conditions, data were collected locally on the computer of the lab, while in the Desktop-Remote condition, data were encrypted and stored in a secure database.</p></sec></sec><sec id="s2-3"><title>Statistical Design</title><p>Separate mixed-design ANOVAs were conducted on mean RTs and d&#x2019;, depending on the cognitive task (see &#x201C;Results&#x201D; section for task-specific details). The within-subject variables for each task are detailed in the &#x201C;Results&#x201D; section, while the between-subject variable across all tasks was the mode of administration. In addition to the ANOVAs, Pearson correlations were computed between task scores to explore individual differences. All analyses were carried out using the Jamovi software package [<xref ref-type="bibr" rid="ref56">56</xref>].</p></sec><sec id="s2-4"><title>Ethical Considerations</title><p>The study conformed to European and national legislation and fundamental ethical principles, including those reflected in the Charter of Fundamental Rights of the European Union, the European Convention on Human Rights and its Supplementary Protocols, and the World Medical Association Declaration of Helsinki. The protocol of the study was given ethics clearance by the Cyprus National Bioethics Committee (approval number: &#x0395;&#x0395;&#x0392;&#x039A; &#x0395;&#x03A0; 2021.01.99). All participants read and signed informed consent forms either in person or digitally. Data were collected and maintained without any participant identification information.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>To investigate whether the 3 gamified tasks replicated the pattern of results reported in the literature for their traditional counterparts and to compare performance across modes of administration, we computed inferential statistics separately for each task. We report the findings in the next subsections.</p></sec><sec id="s3-2"><title>Visual Search</title><p>For Visual Search, we carried out a repeated-measures ANOVA on RT with search type and display size as the within-subject variables and the administration condition as the between-subject variable.</p><p>Results revealed significant main effects for search type, <italic>F</italic><sub>1,72</sub>=787.12, <italic>P</italic>&#x003C;.001, &#x03B7;<sup>2</sup>=0.38; the display size, <italic>F</italic><sub>3,216</sub>=150.49, <italic>P</italic>&#x003C;.001, &#x03B7;<sup>2</sup>=0.17; and the administration condition, <italic>F</italic><sub>2,72</sub>=8.49, <italic>P</italic>=.001, &#x03B7;<sup>2</sup>=0.02. There was also a significant interaction between search type and display size, <italic>F</italic><sub>3,216</sub>=145.42, <italic>P</italic>&#x003C;.001, &#x03B7;<sup>2</sup>=0.16. As seen in <xref ref-type="fig" rid="figure4">Figure 4</xref>, the interaction was driven by the presence of a display size effect (ie, RT increasing from 5 to 15 and then to 25 and further to 40 objects) in conjunction search trials but not in feature search trials. The significant effect for the administration condition was due to shorter RT in the VR-Lab (mean 1.24 seconds) condition than in the Desktop-Lab (mean 1.49 seconds; <italic>P</italic>&#x003C;.001) and Desktop-Remote (mean 1.44 seconds; <italic>P</italic>=.008) conditions (<xref ref-type="fig" rid="figure5">Figure 5</xref>). The difference between the Desktop-Lab and Desktop-Remote conditions was not significant (<italic>P</italic>=1.00). Importantly, no significant interaction involving the administration condition was found. The effect size for search type (&#x03B7;<sup>2</sup>=0.38) is large, suggesting a robust difference in RTs between feature and conjunction searches, consistent with prior literature. The effect size for display size (&#x03B7;<sup>2</sup>=0.17) is also large, highlighting the substantial impact of increasing display size on RTs. In contrast, the main effect of the administration condition (&#x03B7;<sup>2</sup>=0.02) has a small effect size, indicating that the mode of task administration had a statistically significant but relatively small influence on RTs. Finally, the interaction effect between search type and display size was associated with a large effect size (&#x03B7;<sup>2</sup>=0.16), reinforcing the well-documented finding that increasing display size disproportionately affects conjunction searches compared with feature searches.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>The average reaction time in the Visual Search task as a function of search type (feature search vs conjunction search) and display size (5, 15, 25, and 40 objects) for the VR-Lab, Desktop-Lab, and Desktop-Remote conditions. Results are shown in distinct panels for the VR-Lab, the Desktop-Lab, and the Desktop-Remote conditions. Error bars represent 95% confidence intervals.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig04.png"/></fig><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Mean reaction times in the Visual Search task, aggregated across search type and display size, for each of the 3 administration conditions: VR-Lab, Desktop-Lab, and Desktop-Remote. Overall, reaction time was shorter in the VR-Lab condition than in the Desktop-Lab and Desktop-Remote conditions. Error bars represent 95% confidence intervals.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig05.png"/></fig></sec><sec id="s3-3"><title>Whack-the-Mole</title><p>To analyze the accuracy data for the Whack-the-Mole task, we first computed for each participant the percentages of hits and false alarms. Based on the percentages, we computed the d&#x2019; score for each participant. The d&#x2019; score is a sensitive discrimination measure that reflects the degree to which participants accurately report the presence or absence of the target in the display. The d&#x2019; score was calculated using the formula: d&#x2019;=z (hit rate) &#x2212; z (false alarm rate).</p><p>The average d&#x2019; scores were 3.79 for the VR-Lab, 3.75 for the Desktop-Remote, and 3.62 for the Desktop-Lab condition (<xref ref-type="fig" rid="figure6">Figure 6</xref>), in line with the values typically reported in the literature [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. A one-way ANOVA showed that the difference in d&#x2019; across the VR-Lab, Desktop-Lab, and Desktop-Remote conditions was not significant, <italic>F</italic><sub>2,72</sub>=0.71, <italic>P</italic>=.49, &#x03B7;<sup>2</sup>=0.02. In contrast, the analysis on mean RTs for hits only (ie, correct responses in Go trials) showed that participants were significantly slower in the Desktop-Remote condition (mean 0.64 second) than in the Desktop-Lab (mean 0.48 second) and VR-Lab (mean 0.41 second) conditions, both <italic>P</italic> values &#x003C;.001 (<xref ref-type="fig" rid="figure7">Figure 7</xref>). The difference in RTs between the VR-Lab and the Desktop-Lab conditions was marginally significant (<italic>P</italic>=.07).</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>The average d' values in the Whack-the-Mole task across the 3 administration conditions. The d&#x2019; is computed, according to Signal Detection Theory, by taking into account the percentage of hits (ie, correct responses to targets) and false alarms (ie, incorrect responses to distractors). Error bars depict 95% confidence intervals.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig06.png"/></fig><fig position="float" id="figure7"><label>Figure 7.</label><caption><p>Average reaction time for Hits (ie, correct responses to targets) in the Whack-the-Mole task across the 3 administration conditions. Error bars depict 95% confidence intervals.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig07.png"/></fig></sec><sec id="s3-4"><title>Corsi Test</title><p>As shown in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>, the average span scores in the Corsi Test were 5.48 in the VR-Lab condition, 5.68 in the Desktop-Lab condition, and 5.24 in the Desktop-Remote condition. Despite the numeric difference across the 3 conditions, a one-way ANOVA showed that the effect of the administration condition on the Corsi span score was not significant, <italic>F</italic><sub>2,72</sub>=1.45, <italic>P</italic>=.24, &#x03B7;<sup>2</sup>=0.04. These findings indicate that the administration condition had no impact on spatial working memory performance, as measured by the Corsi Test.</p></sec><sec id="s3-5"><title>Individual Differences Within Attentional Tasks</title><p>To examine a possible relation between the 2 distinct attentional processes, that is, Visual Search and response inhibition, we carried out Pearson correlation analyses between the mean RT in feature search and conjunction search trials of the Visual Search task separately and the mean RT of Hits in the Whack-the-Mole task. For these, we averaged the RT across displays for each search type and we combined the data from the 3 administration conditions per task. We found a significant positive correlation between RT for Hits in the Whack-the-Mole task and RT in feature search trials of the Visual Search task, <italic>r</italic><sub>73</sub>=0.24; <italic>P</italic>=.04 (<xref ref-type="fig" rid="figure8">Figure 8</xref>). The correlation between RT for Hits in the Whack-the-Mole task and RT for the conjunction search trials of the Visual Search task was not significant, <italic>r</italic><sub>73</sub>=&#x2212;0.04; <italic>P</italic>=.74.</p><fig position="float" id="figure8"><label>Figure 8.</label><caption><p>Scatter plot depicting the correlation of Whack-the-Mole reaction time for Hits and Visual Search reaction time for Feature Search trials. The significant positive correlation indicates that participants who were faster to respond in Go trials of the Whack-the-Mole task tended to respond faster in the Feature Search trials of the Visual Search task.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v9i1e65836_fig08.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Results</title><p>The main objective of this study was to investigate whether the gamified cognitive tasks we developed yield the same patterns of results as their traditional versions. A secondary aim was to determine whether the mode of administration (immersive VR vs desktop VR) and the task administered remotely or in the lab affect performance outcomes.</p><p>With regard to the main objective, we found that our gamified tasks replicated the traditional patterns observed in previous studies. Specifically, in the Visual Search task, RTs were shorter for feature search trials than for conjunction search trials, aligning with the established finding that feature searches are overall easier [<xref ref-type="bibr" rid="ref15">15</xref>]. Additionally, in conjunction search trials, RT increased significantly with display size, a finding known as the display size effect [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref17">17</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Conversely, RTs remained relatively constant across display sizes in feature search trials, demonstrating the well-established pop-out effect [<xref ref-type="bibr" rid="ref15">15</xref>]. In the Whack-the-Mole task, the overall response inhibition performance aligns with past studies indicating a typical d&#x2019; score between 3 and 4, which reflects moderate sensitivity [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. Our participants exhibited false alarms in about 10% of No-Go trials, consistent with the rate reported in the literature [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>]. Furthermore, in assessing visual short-term memory, our findings showed that our participants had a span between 5 and 7, which is comparable with that reported by studies using the original version of the task [<xref ref-type="bibr" rid="ref35">35</xref>], as well as those using modern electronic implementations delivered via computers or mobile devices [<xref ref-type="bibr" rid="ref33">33</xref>].</p><p>While replicating the expected pattern of results across the 3 administration conditions, we also found some differences across these conditions. For example, performance in the Visual Search task was faster in the VR-Lab condition by 250 milliseconds than in the Desktop-VR condition. One possibility is that this advantage for immersive VR is due to diminished distraction resulting from blocking out extraneous environmental information when wearing an HMD. However, it could also be due to methodological differences between the 2 administration conditions. Specifically, participants in the Desktop-Lab condition responded by moving and clicking the mouse on the target object. Thus, RT encompassed the time to locate the target, the time to move the mouse, and the time to click. In contrast, in the VR-Lab condition, participants pointed the VR controller to the target object without requiring them to press any button. Thus, RT in this condition involved the time to locate the target and the time to move the controller in 3D space. Perhaps moving the controller in 3D space is performed faster than moving a mouse cursor on a screen. Or perhaps, the difference can be attributed to the need to click the mouse in the desktop condition but not in the VR condition. Notably, we found no difference between the Desktop-Lab and Desktop-Remote conditions, suggesting that differences in the environmental settings (eg, varying screen sizes and distances from the screen at participant homes) and environmental distractors that may be greater at home than the laboratory do not interfere significantly with the assessment of Visual Search abilities.</p><p>In the Whack-the-Mole task, performance in the VR-Lab condition was marginally faster by 70 milliseconds than in the Desktop-Lab condition. In contrast to the Visual Search task, in the Whack-the-Mole task, the response requirements were rather similar across the 2 administration conditions. That is, in the VR-Lab condition, participants pulled the trigger of the controller to hit the mole, while in the desktop versions, they clicked the mouse. Thus, taking the results of the 2 tasks together, it seems more likely that in the Whack-the-Mole task, the advantage of immersive VR is due to blocking out access to distracting information from the external environment. Nevertheless, we cannot safely exclude the possibility that the difference is due to subtle differences in the response mode. Perhaps the time needed to pull the trigger on the VR controller is shorter than clicking the mouse button. However, it should be noted that the performance in the Desktop-Lab condition was faster by 160 milliseconds than in the Desktop-Remote condition, despite the fact that the response mode in these 2 conditions was identical. This finding suggests that the differences in overall performance across administration conditions in the Whack-the-Mole task are not methodological.</p><p>A possible explanation for the RT differences across the administration conditions of the Whack-the-Mole task is offered by the Perceptual Load Theory. This theory was proposed by Lavie and Tsal [<xref ref-type="bibr" rid="ref57">57</xref>,<xref ref-type="bibr" rid="ref58">58</xref>] in the mid-1990s as a potential resolution to the early versus late selection debate in attention and perception. Lavie and Tsal argued that when a task is perceptually demanding (referred to as high load), people dedicate all processing resources to the relevant stimuli early on in the information-processing stream. This results in no interference from distracting irrelevant stimuli, as no spare resources are available to process them. In contrast, when the task is not perceptually demanding (ie, a low-load task), spare resources are available after processing the relevant stimuli. These resources, according to the theory, automatically spill into the processing of irrelevant stimuli. Thus, in low-load tasks, selection takes place late with irrelevant stimuli exerting an influence on performance [<xref ref-type="bibr" rid="ref59">59</xref>-<xref ref-type="bibr" rid="ref62">62</xref>]. Notably, in our own research using empirical methods and computational modeling [<xref ref-type="bibr" rid="ref63">63</xref>], we have shown that inducing a narrow focus of attention may mediate whether distractor effects are present. That is, when the tasks force participants to zoom in their attention to stimuli on the screen, rather than having a more relaxed focus of attention, distracting information can exert no influence even if the task is of low load. Here, the Whack-the-Mole can be more easily classified as low load than high load. More importantly, it can be executed efficiently by maintaining a broader focus of attention, that is, by just focusing attention on a large part in the center of the screen rather than zooming in on a small stimulus. Carrying out the task this way allows for external distractors to slow down performance. If participants&#x2019; homes include more environmental distractors than a laboratory, this would translate to more extraneous influence on performance and slowing down of responses. This could also account for the advantage of the VR-Lab condition as donning an HMD serves to effectively block environmental distractors that could influence performance.</p><p>A question that arises, though, is why there was no difference between the Desktop-Lab and Desktop-Remote conditions in the Visual Search task? On one hand, the conjunction search trials of the Visual Search task can be thought of as a high-low task that requires a narrow focus of attention, limiting resources to the processing of the relevant stimuli. This would explain why there were no differences across administration conditions. On the other hand, this cannot apply to the feature search trials. Feature search trials are more likely a low load task and, as such, would allow for the processing of distracting information. Yet, no differences were found across administration conditions. Therefore, it seems that further research, in which response modes and other methodological differences are controlled for, is needed to draw more definitive conclusions.</p></sec><sec id="s4-2"><title>Comparison With Prior Work</title><p>This study examined the impact of administration mode (VR-Lab, Desktop-Lab, and Desktop-Remote) on cognitive performance across 3 tasks: Visual Search, Whack-the-Mole, and Corsi Test. Overall, the results demonstrate that while VR tasks provide a highly immersive and engaging platform for cognitive assessment, the performance differences between VR and traditional desktop-based conditions are task-dependent and vary in magnitude.</p><p>Our results from the Visual Search task replicate the well-documented effects of search type and display size [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref19">19</xref>], with longer RT observed for conjunction searches than for feature searches and RT increasing with display size in conjunction search trials. Notably, participants in the VR-Lab condition exhibited significantly shorter RTs than those in the Desktop-Lab and Desktop-Remote conditions, suggesting that the immersive nature of VR may enhance visual attention and processing speed in this task. This finding aligns with previous research indicating that VR environments can improve cognitive task performance through increased engagement and spatial immersion [<xref ref-type="bibr" rid="ref41">41</xref>]. Barrett et al [<xref ref-type="bibr" rid="ref41">41</xref>] reported longer RT and higher fixation durations in VR compared with 2D environments, which they attributed to the added cognitive demands of immersion. Our results diverge from those of Barrett et al, as the VR-Lab condition here resulted in faster RT, potentially due to the optimized laboratory setup and controlled task parameters. Notably, these patterns of results were obtained with much fewer trials than typically included in the traditional versions of the tasks. For instance, while we ran our gamified Visual Search task with only 96 trials, we produced the same pattern of results as Huang and Pashler [<xref ref-type="bibr" rid="ref64">64</xref>] and Bravo and Nakayama [<xref ref-type="bibr" rid="ref65">65</xref>], who ran 1000 and 240 trials, respectively.</p><p>The Whack-the-Mole task revealed no significant differences in d&#x2019; scores across administration conditions, indicating that accuracy in response inhibition tasks is consistent across VR and desktop modalities. These findings are consistent with those by Bhargava et al [<xref ref-type="bibr" rid="ref42">42</xref>], who found that VR tasks achieved comparable accuracy to traditional assessments while enhancing engagement. However, RTs for Hits were significantly slower in the Desktop-Remote condition than in both the VR-Lab and Desktop-Lab conditions. This delay may reflect a lack of environmental standardization in remote settings, which has been noted as a limitation of remote cognitive testing [<xref ref-type="bibr" rid="ref45">45</xref>]. The marginally faster RT in the VR-Lab condition than in the Desktop-Lab condition (<italic>P</italic>=.07) further highlights the potential for VR to enhance cognitive-motor coordination, as observed in other VR-based cognitive tasks [<xref ref-type="bibr" rid="ref43">43</xref>]. In addition, while Shimi et al [<xref ref-type="bibr" rid="ref20">20</xref>] and Casey et al [<xref ref-type="bibr" rid="ref55">55</xref>] had 220 trials and 300 trials, respectively, in the Whack-the-Mole task they administered, we obtained the same results with only 100 trials.</p><p>The Corsi Test revealed no significant differences in average span scores across conditions, supporting the conclusion that spatial working memory is not influenced by the mode of administration. This aligns with prior findings that traditional and VR-based versions of the Corsi Test produce comparable results [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. While the observed numeric differences were less and nonsignificant, the slightly lower scores in the Desktop-Remote condition may reflect environmental distractions or variability in participant equipment, which can affect remote testing reliability [<xref ref-type="bibr" rid="ref45">45</xref>].</p><p>The positive correlation between RT for Hits in the Whack-the-Mole task and feature search trials in the Visual Search task suggests an underlying link between response inhibition and attentional processes, particularly in tasks involving rapid response execution. However, the absence of a significant correlation between RT in conjunction search trials and response inhibition suggests that these processes may operate independently under higher cognitive demands. These findings add to the growing literature on individual differences in attentional control [<xref ref-type="bibr" rid="ref44">44</xref>] and further validate the use of gamified tasks in exploring these relationships.</p><p>Our findings emphasize the importance of task design and administration context in cognitive testing. While VR can enhance engagement and, in some cases, improve task performance, the added cognitive load of immersive environments may not universally benefit all cognitive processes [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref45">45</xref>]. The only minor differences in accuracy and spatial memory performance across administration modes highlight the robustness of these tasks to different delivery methods, including remote testing. However, slower RT in remote conditions underscores the need for careful standardization and participant instructions when deploying tasks outside laboratory settings.</p><p>Future studies should explore the impact of task complexity and individual differences on performance across VR and traditional modalities. Additionally, longitudinal research is needed to assess whether the observed benefits of VR in certain cognitive domains translate into practical advantages in real-world applications, such as rehabilitation and training.</p></sec><sec id="s4-3"><title>Limitations</title><p>In this study, we did not assess participants&#x2019; prior experience with VR. Previous exposure to VR could have influenced RTs, with participants who have used VR systems before potentially showing faster RTs due to greater familiarity with the technology. This factor should be considered in future studies, and including a questionnaire to assess past VR experience would be a valuable addition to control for this variable and better understand its impact on performance.</p><p>While this study focuses on the potential engagement benefits of VR-based cognitive tasks, it is important to acknowledge that VR can also introduce challenges. One key limitation is the potential for cybersickness, eye strain, and general fatigue associated with prolonged VR use. These factors can negatively impact participant performance [<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref67">67</xref>] and may have contributed to variability in the results, particularly among participants who were less familiar with VR technology. Although we did not directly assess these issues by asking participants, no participants in the VR group reported any difficulties or discomfort with VR during or after their participation. While this suggests that cybersickness or fatigue was not a major confound, future studies could benefit from incorporating standardized self-report questionnaires or physiological measures to better isolate the impact of VR engagement on cognitive performance.</p><p>Notably, we did not include a paper-and-pencil comparison group, as our primary aim was to contrast the gamified VR tasks with traditional desktop-based digital assessments. Including a paper-and-pencil comparison could provide insights to cross-modality generalizability, but it would also introduce additional variables unrelated to our core research objective, such as differences in response modalities and environmental influences. Nevertheless, we acknowledge that such a comparison could be valuable in future research to examine broader ecological validity, recognizing that certain tasks in our study (eg, Whack-the-Mole) are not feasible in a paper-based format.</p></sec><sec id="s4-4"><title>Conclusions</title><p>This study demonstrated that gamified VR tasks can replicate well-established cognitive performance patterns using fewer trials, highlighting their potential as efficient and engaging tools for assessing attention, response inhibition, and visual short-term memory. By comparing performance across immersive VR, desktop-based, and remote administration conditions, we found that VR-based tasks maintained ecological validity and produced results consistent with traditional methods, suggesting that increased immersion does not compromise data quality. These advantages are particularly relevant for reducing participant fatigue and enhancing motivation, factors essential for high-quality cognitive assessment.</p><p>At the same time, VR introduces methodological variables that require further attention. In this study, we did not directly assess cybersickness, fatigue, or prior VR experience, which may influence individual performance. In addition, comparisons to paper-and-pencil tasks were not included, as the tasks examined here are typically administered in computerized form. Future research should explore these aspects systematically and investigate how task-specific features and environmental settings interact with immersion to affect cognitive outcomes. Overall, our findings support the use of immersive VR as a viable and scalable platform for cognitive assessment, especially in applications that benefit from high engagement and ecological realism.</p></sec></sec></body><back><ack><p>This project has received partial funding from the European Union&#x2019;s Horizon 2020 Research and Innovation Programme (grant agreement number 739578) and the Government of the Republic of Cyprus through the Deputy Ministry of Research, Innovation, and Digital Policy.</p></ack><notes><sec><title>Data Availability</title><p>The aggregated data file on which all analyses were carried out is freely available on the Open Science Framework at <ext-link ext-link-type="uri" xlink:href="https://osf.io/e9ry2">https://osf.io/e9ry2</ext-link></p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">ACE-III</term><def><p>Addenbrooke&#x2019;s Cognitive Examination&#x2014;III</p></def></def-item><def-item><term id="abb2">HMD</term><def><p>head-mounted display</p></def></def-item><def-item><term id="abb3">RT</term><def><p>reaction time</p></def></def-item><def-item><term id="abb4">VR</term><def><p>virtual reality</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smith</surname><given-names>EE</given-names> </name><name name-style="western"><surname>Jonides</surname><given-names>J</given-names> </name></person-group><article-title>Storage and executive processes in the frontal lobes</article-title><source>Science</source><year>1999</year><month>03</month><day>12</day><volume>283</volume><issue>5408</issue><fpage>1657</fpage><lpage>1661</lpage><pub-id pub-id-type="doi">10.1126/science.283.5408.1657</pub-id><pub-id pub-id-type="medline">10073923</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Miller</surname><given-names>EK</given-names> </name><name name-style="western"><surname>Cohen</surname><given-names>JD</given-names> </name></person-group><article-title>An integrative theory of prefrontal cortex function</article-title><source>Annu Rev Neurosci</source><year>2001</year><volume>24</volume><issue>1</issue><fpage>167</fpage><lpage>202</lpage><pub-id pub-id-type="doi">10.1146/annurev.neuro.24.1.167</pub-id><pub-id pub-id-type="medline">11283309</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Eriksen</surname><given-names>BA</given-names> </name><name name-style="western"><surname>Eriksen</surname><given-names>CW</given-names> </name></person-group><article-title>Effects of noise letters upon the identification of a target letter in a nonsearch task</article-title><source>Percept Psychophys</source><year>1974</year><month>01</month><volume>16</volume><issue>1</issue><fpage>143</fpage><lpage>149</lpage><pub-id pub-id-type="doi">10.3758/BF03203267</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Posner</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Petersen</surname><given-names>SE</given-names> </name></person-group><article-title>The attention system of the human brain</article-title><source>Annu Rev Neurosci</source><year>1990</year><volume>13</volume><fpage>25</fpage><lpage>42</lpage><pub-id pub-id-type="doi">10.1146/annurev.ne.13.030190.000325</pub-id><pub-id pub-id-type="medline">2183676</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Mekler</surname><given-names>ED</given-names> </name><name name-style="western"><surname>Br&#x00FC;hlmann</surname><given-names>F</given-names> </name><name name-style="western"><surname>Opwis</surname><given-names>K</given-names> </name><name name-style="western"><surname>Tuch</surname><given-names>AN</given-names> </name></person-group><article-title>Disassembling gamification: the effects of points and meaning on user motivation and performance</article-title><conf-name>CHI &#x2019;13: CHI Conference on Human Factors in Computing Systems</conf-name><conf-date>Apr 27 to May 2, 2013</conf-date><conf-loc>Paris, France</conf-loc></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lang</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bradley</surname><given-names>SD</given-names> </name><name name-style="western"><surname>Park</surname><given-names>B</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>M</given-names> </name><name name-style="western"><surname>Chung</surname><given-names>Y</given-names> </name></person-group><article-title>Parsing the resource pie: using STRTs to measure attention to mediated messages</article-title><source>Media Psychol</source><year>2006</year><month>11</month><volume>8</volume><issue>4</issue><fpage>369</fpage><lpage>394</lpage><pub-id pub-id-type="doi">10.1207/s1532785xmep0804_3</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Warm</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Parasuraman</surname><given-names>R</given-names> </name><name name-style="western"><surname>Matthews</surname><given-names>G</given-names> </name></person-group><article-title>Vigilance requires hard mental work and is stressful</article-title><source>Hum Factors</source><year>2008</year><month>06</month><volume>50</volume><issue>3</issue><fpage>433</fpage><lpage>441</lpage><pub-id pub-id-type="doi">10.1518/001872008X312152</pub-id><pub-id pub-id-type="medline">18689050</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parsons</surname><given-names>TD</given-names> </name></person-group><article-title>Virtual reality for enhanced ecological validity and experimental control in the clinical, affective and social neurosciences</article-title><source>Front Hum Neurosci</source><year>2015</year><volume>9</volume><fpage>660</fpage><pub-id pub-id-type="doi">10.3389/fnhum.2015.00660</pub-id><pub-id pub-id-type="medline">26696869</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Slater</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sanchez-Vives</surname><given-names>MV</given-names> </name></person-group><article-title>Enhancing Our Lives with Immersive Virtual Reality</article-title><source>Front Robot AI</source><year>2016</year><month>12</month><day>19</day><volume>3</volume><fpage>74</fpage><pub-id pub-id-type="doi">10.3389/frobt.2016.00074</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Deterding</surname><given-names>S</given-names> </name><name name-style="western"><surname>Dixon</surname><given-names>D</given-names> </name><name name-style="western"><surname>Khaled</surname><given-names>R</given-names> </name><name name-style="western"><surname>Nacke</surname><given-names>L</given-names> </name></person-group><article-title>From game design elements to gamefulness: defining" gamification"</article-title><conf-name>Proceedings of the 15th International Academic MindTrek Conference: Envisioning Future Media Environments</conf-name><conf-date>Sep 28-30, 2011</conf-date><conf-loc>Tampere, Finland</conf-loc></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hamari</surname><given-names>J</given-names> </name><name name-style="western"><surname>Koivisto</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sarsa</surname><given-names>H</given-names> </name></person-group><article-title>Does gamification work?&#x2014;A literature review of empirical studies on gamification</article-title><conf-name>2014 47th Hawaii International Conference on System Sciences</conf-name><conf-date>Jan 6-9, 2014</conf-date><conf-loc>Waikoloa, HI, USA</conf-loc></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>CC</given-names> </name><name name-style="western"><surname>Hsiao</surname><given-names>KL</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>CC</given-names> </name></person-group><article-title>Exploring the benefit and sacrifice factors of virtual reality gameplay</article-title><source>Front Psychol</source><year>2020</year><volume>11</volume><fpage>251</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2020.00251</pub-id><pub-id pub-id-type="medline">32210870</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>W</given-names> </name><name name-style="western"><surname>Roscoe</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Johnson&#x2010;Glenberg</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Craig</surname><given-names>SD</given-names> </name></person-group><article-title>Motivation, engagement, and performance across multiple virtual reality sessions and levels of immersion</article-title><source>J Comput Assist Learn</source><year>2021</year><month>06</month><volume>37</volume><issue>3</issue><fpage>745</fpage><lpage>758</lpage><pub-id pub-id-type="doi">10.1111/jcal.12520</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mittelstaedt</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Wacker</surname><given-names>J</given-names> </name><name name-style="western"><surname>Stelling</surname><given-names>D</given-names> </name></person-group><article-title>VR aftereffect and the relation of cybersickness and cognitive performance</article-title><source>Virtual Real</source><year>2019</year><month>06</month><volume>23</volume><issue>2</issue><fpage>143</fpage><lpage>154</lpage><pub-id pub-id-type="doi">10.1007/s10055-018-0370-3</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wolfe</surname><given-names>JM</given-names> </name></person-group><article-title>Major issues in the study of visual search: part 2 of &#x201C;40 years of feature integration: special issue in memory of Anne Treisman&#x201D;</article-title><source>Atten Percept Psychophys</source><year>2020</year><month>02</month><volume>82</volume><issue>2</issue><fpage>383</fpage><lpage>393</lpage><pub-id pub-id-type="doi">10.3758/s13414-020-02022-1</pub-id><pub-id pub-id-type="medline">32291612</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morar</surname><given-names>SS</given-names> </name><name name-style="western"><surname>Macredie</surname><given-names>RD</given-names> </name><name name-style="western"><surname>Cribbin</surname><given-names>T</given-names> </name></person-group><article-title>An investigation of visual cues used to create and support frames of reference and visual search tasks in desktop virtual environments</article-title><source>Virtual Real</source><year>2002</year><month>10</month><day>1</day><volume>6</volume><issue>3</issue><fpage>140</fpage><lpage>150</lpage><pub-id pub-id-type="doi">10.1007/s100550200015</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smilek</surname><given-names>D</given-names> </name><name name-style="western"><surname>Enns</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Eastwood</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Merikle</surname><given-names>PM</given-names> </name></person-group><article-title>Relax! Cognitive strategy influences visual search</article-title><source>Vis Cogn</source><year>2006</year><month>08</month><volume>14</volume><issue>4-8</issue><fpage>543</fpage><lpage>564</lpage><pub-id pub-id-type="doi">10.1080/13506280500193487</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mueller</surname><given-names>ST</given-names> </name></person-group><article-title>A partial implementation of the Bica Cognitive Decathlon Using the psychology experiment building language (PEBL)</article-title><source>Int J Mach Conscious</source><year>2010</year><month>12</month><volume>02</volume><issue>2</issue><fpage>273</fpage><lpage>288</lpage><pub-id pub-id-type="doi">10.1142/S1793843010000497</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lumsden</surname><given-names>J</given-names> </name><name name-style="western"><surname>Edwards</surname><given-names>EA</given-names> </name><name name-style="western"><surname>Lawrence</surname><given-names>NS</given-names> </name><name name-style="western"><surname>Coyle</surname><given-names>D</given-names> </name><name name-style="western"><surname>Munaf&#x00F2;</surname><given-names>MR</given-names> </name></person-group><article-title>Gamification of cognitive assessment and cognitive training: a systematic review of applications and efficacy</article-title><source>JMIR Serious Games</source><year>2016</year><month>07</month><day>15</day><volume>4</volume><issue>2</issue><fpage>e11</fpage><pub-id pub-id-type="doi">10.2196/games.5888</pub-id><pub-id pub-id-type="medline">27421244</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shimi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tsestou</surname><given-names>V</given-names> </name><name name-style="western"><surname>Hadjiaros</surname><given-names>M</given-names> </name><name name-style="western"><surname>Neokleous</surname><given-names>K</given-names> </name><name name-style="western"><surname>Avraamides</surname><given-names>M</given-names> </name></person-group><article-title>Attentional skills in soccer: evaluating the involvement of attention in executing a goalkeeping task in virtual reality</article-title><source>Appl Sci (Basel)</source><year>2021</year><month>01</month><volume>11</volume><issue>19</issue><fpage>9341</fpage><pub-id pub-id-type="doi">10.3390/app11199341</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cragg</surname><given-names>L</given-names> </name><name name-style="western"><surname>Nation</surname><given-names>K</given-names> </name></person-group><article-title>Go or no-go? Developmental improvements in the efficiency of response inhibition in mid-childhood</article-title><source>Dev Sci</source><year>2008</year><month>11</month><volume>11</volume><issue>6</issue><fpage>819</fpage><lpage>827</lpage><pub-id pub-id-type="doi">10.1111/j.1467-7687.2008.00730.x</pub-id><pub-id pub-id-type="medline">19046150</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nigg</surname><given-names>J</given-names> </name><name name-style="western"><surname>Nikolas</surname><given-names>M</given-names> </name><name name-style="western"><surname>Friderici</surname><given-names>K</given-names> </name><name name-style="western"><surname>Park</surname><given-names>L</given-names> </name><name name-style="western"><surname>Zucker</surname><given-names>RA</given-names> </name></person-group><article-title>Genotype and neuropsychological response inhibition as resilience promoters for attention-deficit/hyperactivity disorder, oppositional defiant disorder, and conduct disorder under conditions of psychosocial adversity</article-title><source>Dev Psychopathol</source><year>2007</year><volume>19</volume><issue>3</issue><fpage>767</fpage><lpage>786</lpage><pub-id pub-id-type="doi">10.1017/S0954579407000387</pub-id><pub-id pub-id-type="medline">17705902</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Allan</surname><given-names>NP</given-names> </name><name name-style="western"><surname>Hume</surname><given-names>LE</given-names> </name><name name-style="western"><surname>Allan</surname><given-names>DM</given-names> </name><name name-style="western"><surname>Farrington</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Lonigan</surname><given-names>CJ</given-names> </name></person-group><article-title>Relations between inhibitory control and the development of academic skills in preschool and kindergarten: a meta-analysis</article-title><source>Dev Psychol</source><year>2014</year><month>10</month><volume>50</volume><issue>10</issue><fpage>2368</fpage><lpage>2379</lpage><pub-id pub-id-type="doi">10.1037/a0037493</pub-id><pub-id pub-id-type="medline">25069051</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schmitt</surname><given-names>LM</given-names> </name><name name-style="western"><surname>White</surname><given-names>SP</given-names> </name><name name-style="western"><surname>Cook</surname><given-names>EH</given-names> </name><name name-style="western"><surname>Sweeney</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Mosconi</surname><given-names>MW</given-names> </name></person-group><article-title>Cognitive mechanisms of inhibitory control deficits in autism spectrum disorder</article-title><source>J Child Psychol Psychiatry</source><year>2018</year><month>05</month><volume>59</volume><issue>5</issue><fpage>586</fpage><lpage>595</lpage><pub-id pub-id-type="doi">10.1111/jcpp.12837</pub-id><pub-id pub-id-type="medline">29052841</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aron</surname><given-names>AR</given-names> </name><name name-style="western"><surname>Poldrack</surname><given-names>RA</given-names> </name></person-group><article-title>The cognitive neuroscience of response inhibition: relevance for genetic research in attention-deficit/hyperactivity disorder</article-title><source>Biol Psychiatry</source><year>2005</year><month>06</month><day>1</day><volume>57</volume><issue>11</issue><fpage>1285</fpage><lpage>1292</lpage><pub-id pub-id-type="doi">10.1016/j.biopsych.2004.10.026</pub-id><pub-id pub-id-type="medline">15950000</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Hulst</surname><given-names>BM</given-names> </name><name name-style="western"><surname>de Zeeuw</surname><given-names>P</given-names> </name><name name-style="western"><surname>Vlaskamp</surname><given-names>C</given-names> </name><name name-style="western"><surname>Rijks</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zandbelt</surname><given-names>BB</given-names> </name><name name-style="western"><surname>Durston</surname><given-names>S</given-names> </name></person-group><article-title>Children with ADHD symptoms show deficits in reactive but not proactive inhibition, irrespective of their formal diagnosis</article-title><source>Psychol Med</source><year>2018</year><month>11</month><volume>48</volume><issue>15</issue><fpage>2515</fpage><lpage>2521</lpage><pub-id pub-id-type="doi">10.1017/S0033291718000107</pub-id><pub-id pub-id-type="medline">29415788</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Barker</surname><given-names>V</given-names> </name><name name-style="western"><surname>Romaniuk</surname><given-names>L</given-names> </name><name name-style="western"><surname>Cardinal</surname><given-names>RN</given-names> </name><name name-style="western"><surname>Pope</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nicol</surname><given-names>K</given-names> </name><name name-style="western"><surname>Hall</surname><given-names>J</given-names> </name></person-group><article-title>Impulsivity in borderline personality disorder</article-title><source>Psychol Med</source><year>2015</year><month>07</month><volume>45</volume><issue>9</issue><fpage>1955</fpage><lpage>1964</lpage><pub-id pub-id-type="doi">10.1017/S0033291714003079</pub-id><pub-id pub-id-type="medline">25600066</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Evans</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Strafella</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Weintraub</surname><given-names>D</given-names> </name><name name-style="western"><surname>Stacy</surname><given-names>M</given-names> </name></person-group><article-title>Impulsive and compulsive behaviors in Parkinson&#x2019;s disease</article-title><source>Mov Disord</source><year>2009</year><month>08</month><day>15</day><volume>24</volume><issue>11</issue><fpage>1561</fpage><lpage>1570</lpage><pub-id pub-id-type="doi">10.1002/mds.22505</pub-id><pub-id pub-id-type="medline">19526584</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hirose</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chikazoe</surname><given-names>J</given-names> </name><name name-style="western"><surname>Watanabe</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Efficiency of go/no-go task performance implemented in the left hemisphere</article-title><source>J Neurosci</source><year>2012</year><month>06</month><day>27</day><volume>32</volume><issue>26</issue><fpage>9059</fpage><lpage>9065</lpage><pub-id pub-id-type="doi">10.1523/JNEUROSCI.0540-12.2012</pub-id><pub-id pub-id-type="medline">22745505</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guti&#x00E9;rrez-Cobo</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Cabello</surname><given-names>R</given-names> </name><name name-style="western"><surname>Fern&#x00E1;ndez-Berrocal</surname><given-names>P</given-names> </name></person-group><article-title>The three models of emotional intelligence and performance in a hot and cool go/no-go task in undergraduate students</article-title><source>Front Behav Neurosci</source><year>2017</year><volume>11</volume><fpage>33</fpage><pub-id pub-id-type="doi">10.3389/fnbeh.2017.00033</pub-id><pub-id pub-id-type="medline">28275343</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="thesis"><person-group person-group-type="author"><name name-style="western"><surname>Corsi</surname><given-names>PM</given-names> </name></person-group><article-title>Human Memory and the Medial Temporal Region of the Brain</article-title><source>PhD Thesis</source><year>1973</year><publisher-name>ProQuest Information &#x0026; Learning</publisher-name></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Claessen</surname><given-names>MHG</given-names> </name><name name-style="western"><surname>van der Ham</surname><given-names>IJM</given-names> </name><name name-style="western"><surname>van Zandvoort</surname><given-names>MJE</given-names> </name></person-group><article-title>Computerization of the standard Corsi block-tapping task affects its underlying cognitive concepts: a pilot study</article-title><source>Appl Neuropsychol Adult</source><year>2015</year><volume>22</volume><issue>3</issue><fpage>180</fpage><lpage>188</lpage><pub-id pub-id-type="doi">10.1080/23279095.2014.892488</pub-id><pub-id pub-id-type="medline">25258029</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Siddi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Preti</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lara</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Comparison of the touch-screen and traditional versions of the Corsi block-tapping test in patients with psychosis and healthy controls</article-title><source>BMC Psychiatry</source><year>2020</year><month>06</month><day>23</day><volume>20</volume><issue>1</issue><fpage>329</fpage><pub-id pub-id-type="doi">10.1186/s12888-020-02716-8</pub-id><pub-id pub-id-type="medline">32576254</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Hadjiaros</surname><given-names>M</given-names> </name><name name-style="western"><surname>Neokleous</surname><given-names>K</given-names> </name><name name-style="western"><surname>Schiza</surname><given-names>E</given-names> </name><name name-style="western"><surname>Matsangidou</surname><given-names>M</given-names> </name><name name-style="western"><surname>Avraamides</surname><given-names>MN</given-names> </name><name name-style="western"><surname>Pattichis</surname><given-names>CS</given-names> </name></person-group><article-title>A game-based cognitive assessment for visuospatial tasks: evaluation in healthy adults</article-title><conf-name>2021 IEEE 21st International Conference on Bioinformatics and Bioengineering (BIBE)</conf-name><conf-date>Oct 25-27, 2021</conf-date><conf-loc>Kragujevac, Serbia</conf-loc><fpage>1</fpage><lpage>5</lpage><pub-id pub-id-type="doi">10.1109/BIBE52308.2021.9635507</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kessels</surname><given-names>RP</given-names> </name><name name-style="western"><surname>van Zandvoort</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Postma</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kappelle</surname><given-names>LJ</given-names> </name><name name-style="western"><surname>de Haan</surname><given-names>EH</given-names> </name></person-group><article-title>The Corsi Block-Tapping Task: standardization and normative data</article-title><source>Appl Neuropsychol</source><year>2000</year><volume>7</volume><issue>4</issue><fpage>252</fpage><lpage>258</lpage><pub-id pub-id-type="doi">10.1207/S15324826AN0704_8</pub-id><pub-id pub-id-type="medline">11296689</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dahlstrom-Hakki</surname><given-names>I</given-names> </name><name name-style="western"><surname>Alstad</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Asbell-Clarke</surname><given-names>J</given-names> </name><name name-style="western"><surname>Edwards</surname><given-names>T</given-names> </name></person-group><article-title>The impact of visual and auditory distractions on the performance of neurodiverse students in virtual reality (VR) environments</article-title><source>Virtual Real</source><year>2024</year><month>03</month><volume>28</volume><issue>1</issue><pub-id pub-id-type="doi">10.1007/s10055-023-00933-6</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hung</surname><given-names>SC</given-names> </name><name name-style="western"><surname>Ho</surname><given-names>AY</given-names> </name><name name-style="western"><surname>Lai</surname><given-names>IH</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>CS</given-names> </name><name name-style="western"><surname>Pong</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Lai</surname><given-names>FH</given-names> </name></person-group><article-title>Meta-analysis on the effectiveness of virtual reality cognitive training (VRCT) and computer-based cognitive training (CBCT) for individuals with mild cognitive impairment (MCI)</article-title><source>Electronics (Basel)</source><year>2020</year><month>12</month><day>18</day><volume>9</volume><issue>12</issue><fpage>2185</fpage><pub-id pub-id-type="doi">10.3390/electronics9122185</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Slater</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wilbur</surname><given-names>S</given-names> </name></person-group><article-title>A framework for immersive virtual environments (FIVE): speculations on the role of presence in virtual environments</article-title><source>Presence Teleoperators Virtual Environ</source><year>1997</year><month>12</month><volume>6</volume><issue>6</issue><fpage>603</fpage><lpage>616</lpage><pub-id pub-id-type="doi">10.1162/pres.1997.6.6.603</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bailenson</surname><given-names>JN</given-names> </name><name name-style="western"><surname>Beall</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Loomis</surname><given-names>J</given-names> </name><name name-style="western"><surname>Blascovich</surname><given-names>J</given-names> </name><name name-style="western"><surname>Turk</surname><given-names>M</given-names> </name></person-group><article-title>Transformed social interaction: decoupling representation from behavior and form in collaborative virtual environments</article-title><source>Presence Teleoperators Virtual Environ</source><year>2004</year><month>08</month><day>1</day><volume>13</volume><issue>4</issue><fpage>428</fpage><lpage>441</lpage><pub-id pub-id-type="doi">10.1162/1054746041944803</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bailenson</surname><given-names>J</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>K</given-names> </name><name name-style="western"><surname>Nielsen</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bajscy</surname><given-names>R</given-names> </name><name name-style="western"><surname>Jung</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Kurillo</surname><given-names>G</given-names> </name></person-group><article-title>The effect of interactivity on learning physical actions in virtual reality</article-title><source>Media Psychol</source><year>2008</year><month>09</month><day>17</day><volume>11</volume><issue>3</issue><fpage>354</fpage><lpage>376</lpage><pub-id pub-id-type="doi">10.1080/15213260802285214</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Barrett</surname><given-names>RCA</given-names> </name><name name-style="western"><surname>Poe</surname><given-names>R</given-names> </name><name name-style="western"><surname>O&#x2019;Camb</surname><given-names>JW</given-names> </name><etal/></person-group><article-title>Comparing virtual reality, desktop-based 3D, and 2D versions of a category learning experiment</article-title><source>PLoS One</source><year>2022</year><volume>17</volume><issue>10</issue><fpage>e0275119</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0275119</pub-id><pub-id pub-id-type="medline">36201546</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhargava</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kottapalli</surname><given-names>A</given-names> </name><name name-style="western"><surname>Baths</surname><given-names>V</given-names> </name></person-group><article-title>Validation and comparison of virtual reality and 3D mobile games for cognitive assessment against ACE-III in 82 young participants</article-title><source>Sci Rep</source><year>2024</year><month>10</month><day>13</day><volume>14</volume><issue>1</issue><fpage>23918</fpage><pub-id pub-id-type="doi">10.1038/s41598-024-75065-1</pub-id><pub-id pub-id-type="medline">39397120</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Faria</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Pinho</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Berm&#x00FA;dez I Badia</surname><given-names>S</given-names> </name></person-group><article-title>A comparison of two personalization and adaptive cognitive rehabilitation approaches: a randomized controlled trial with chronic stroke patients</article-title><source>J Neuroeng Rehabil</source><year>2020</year><month>06</month><day>16</day><volume>17</volume><issue>1</issue><fpage>78</fpage><pub-id pub-id-type="doi">10.1186/s12984-020-00691-5</pub-id><pub-id pub-id-type="medline">32546251</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tan</surname><given-names>NC</given-names> </name><name name-style="western"><surname>Lim</surname><given-names>JE</given-names> </name><name name-style="western"><surname>Sultana</surname><given-names>R</given-names> </name><name name-style="western"><surname>Quah</surname><given-names>JHM</given-names> </name><name name-style="western"><surname>Wong</surname><given-names>WT</given-names> </name></person-group><article-title>A virtual reality cognitive screening tool based on the six cognitive domains</article-title><source>Alzheimers Dement (Amst)</source><year>2024</year><volume>16</volume><issue>4</issue><fpage>e70030</fpage><pub-id pub-id-type="doi">10.1002/dad2.70030</pub-id><pub-id pub-id-type="medline">39713248</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Negu&#x0163;</surname><given-names>A</given-names> </name><name name-style="western"><surname>Matu</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Sava</surname><given-names>FA</given-names> </name><name name-style="western"><surname>David</surname><given-names>D</given-names> </name></person-group><article-title>Task difficulty of virtual reality-based assessment tools compared to classical paper-and-pencil or computerized measures: a meta-analytic approach</article-title><source>Comput Human Behav</source><year>2016</year><month>01</month><volume>54</volume><fpage>414</fpage><lpage>424</lpage><pub-id pub-id-type="doi">10.1016/j.chb.2015.08.029</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dandurand</surname><given-names>F</given-names> </name><name name-style="western"><surname>Shultz</surname><given-names>TR</given-names> </name><name name-style="western"><surname>Onishi</surname><given-names>KH</given-names> </name></person-group><article-title>Comparing online and lab methods in a problem-solving experiment</article-title><source>Behav Res Methods</source><year>2008</year><month>05</month><volume>40</volume><issue>2</issue><fpage>428</fpage><lpage>434</lpage><pub-id pub-id-type="doi">10.3758/brm.40.2.428</pub-id><pub-id pub-id-type="medline">18522052</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gould</surname><given-names>SJJ</given-names> </name><name name-style="western"><surname>Cox</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Brumby</surname><given-names>DP</given-names> </name><name name-style="western"><surname>Wiseman</surname><given-names>S</given-names> </name></person-group><article-title>Home is where the lab is: a comparison of online and lab data from a time-sensitive study of interruption</article-title><source>Hum Comput</source><year>2015</year><volume>2</volume><issue>1</issue><fpage>45</fpage><lpage>67</lpage><pub-id pub-id-type="doi">10.15346/hc.v2i1.4</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>de Leeuw</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Motz</surname><given-names>BA</given-names> </name></person-group><article-title>Psychophysics in a web browser? Comparing response times collected with JavaScript and Psychophysics Toolbox in a visual search task</article-title><source>Behav Res</source><year>2016</year><month>03</month><volume>48</volume><issue>1</issue><fpage>1</fpage><lpage>12</lpage><pub-id pub-id-type="doi">10.3758/s13428-015-0567-2</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Armitage</surname><given-names>J</given-names> </name><name name-style="western"><surname>Eerola</surname><given-names>T</given-names> </name></person-group><article-title>Reaction time data in music cognition: comparison of pilot data from lab, crowdsourced, and convenience web samples</article-title><source>Front Psychol</source><year>2019</year><volume>10</volume><fpage>2883</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2019.02883</pub-id><pub-id pub-id-type="medline">31969849</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bartneck</surname><given-names>C</given-names> </name><name name-style="western"><surname>Duenser</surname><given-names>A</given-names> </name><name name-style="western"><surname>Moltchanova</surname><given-names>E</given-names> </name><name name-style="western"><surname>Zawieska</surname><given-names>K</given-names> </name></person-group><article-title>Comparing the similarity of responses received from studies in Amazon&#x2019;s Mechanical Turk to studies conducted online and with direct recruitment</article-title><source>PLoS One</source><year>2015</year><volume>10</volume><issue>4</issue><fpage>e0121595</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0121595</pub-id><pub-id pub-id-type="medline">25876027</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hilbig</surname><given-names>BE</given-names> </name></person-group><article-title>Reaction time effects in lab- versus web-based research: experimental evidence</article-title><source>Behav Res Methods</source><year>2016</year><month>12</month><volume>48</volume><issue>4</issue><fpage>1718</fpage><lpage>1724</lpage><pub-id pub-id-type="doi">10.3758/s13428-015-0678-9</pub-id><pub-id pub-id-type="medline">26542972</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Saunders</surname><given-names>DR</given-names> </name><name name-style="western"><surname>Bex</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Woods</surname><given-names>RL</given-names> </name></person-group><article-title>Crowdsourcing a normative natural language dataset: a comparison of Amazon Mechanical Turk and in-lab data collection</article-title><source>J Med Internet Res</source><year>2013</year><month>05</month><day>20</day><volume>15</volume><issue>5</issue><fpage>e100</fpage><pub-id pub-id-type="doi">10.2196/jmir.2620</pub-id><pub-id pub-id-type="medline">23689038</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sauce</surname><given-names>B</given-names> </name><name name-style="western"><surname>Matzel</surname><given-names>LD</given-names> </name></person-group><article-title>The causes of variation in learning and behavior: why individual differences matter</article-title><source>Front Psychol</source><year>2013</year><volume>4</volume><fpage>395</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2013.00395</pub-id><pub-id pub-id-type="medline">23847569</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Segen</surname><given-names>V</given-names> </name><name name-style="western"><surname>Avraamides</surname><given-names>M</given-names> </name><name name-style="western"><surname>Slattery</surname><given-names>T</given-names> </name><name name-style="western"><surname>Colombo</surname><given-names>G</given-names> </name><name name-style="western"><surname>Wiener</surname><given-names>JM</given-names> </name></person-group><article-title>Comparable performance on a spatial memory task in data collected in the lab and online</article-title><source>PLoS One</source><year>2021</year><volume>16</volume><issue>11</issue><fpage>e0259367</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0259367</pub-id><pub-id pub-id-type="medline">34843521</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Casey</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Trainor</surname><given-names>RJ</given-names> </name><name name-style="western"><surname>Orendi</surname><given-names>JL</given-names> </name><etal/></person-group><article-title>A developmental functional MRI study of prefrontal activation during performance of a go-no-go task</article-title><source>J Cogn Neurosci</source><year>1997</year><month>11</month><volume>9</volume><issue>6</issue><fpage>835</fpage><lpage>847</lpage><pub-id pub-id-type="doi">10.1162/jocn.1997.9.6.835</pub-id><pub-id pub-id-type="medline">23964603</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="web"><article-title>Jamovi (version 23) [computer software]</article-title><source>The Jamovi Project</source><year>2022</year><access-date>2025-06-30</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.jamovi.org">https://www.jamovi.org</ext-link></comment></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lavie</surname><given-names>N</given-names> </name><name name-style="western"><surname>Tsal</surname><given-names>Y</given-names> </name></person-group><article-title>Perceptual load as a major determinant of the locus of selection in visual attention</article-title><source>Percept Psychophys</source><year>1994</year><month>03</month><volume>56</volume><issue>2</issue><fpage>183</fpage><lpage>197</lpage><pub-id pub-id-type="doi">10.3758/BF03213897</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lavie</surname><given-names>N</given-names> </name></person-group><article-title>Perceptual load as a necessary condition for selective attention</article-title><source>J Exp Psychol Hum Percept Perform</source><year>1995</year><volume>21</volume><issue>3</issue><fpage>451</fpage><lpage>468</lpage><pub-id pub-id-type="doi">10.1037//0096-1523.21.3.451</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cartwright-Finch</surname><given-names>U</given-names> </name><name name-style="western"><surname>Lavie</surname><given-names>N</given-names> </name></person-group><article-title>The role of perceptual load in inattentional blindness</article-title><source>Cognition</source><year>2007</year><month>03</month><volume>102</volume><issue>3</issue><fpage>321</fpage><lpage>340</lpage><pub-id pub-id-type="doi">10.1016/j.cognition.2006.01.002</pub-id><pub-id pub-id-type="medline">16480973</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lavie</surname><given-names>N</given-names> </name><name name-style="western"><surname>Hirst</surname><given-names>A</given-names> </name><name name-style="western"><surname>de Fockert</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Viding</surname><given-names>E</given-names> </name></person-group><article-title>Load theory of selective attention and cognitive control</article-title><source>J Exp Psychol Gen</source><year>2004</year><month>09</month><volume>133</volume><issue>3</issue><fpage>339</fpage><lpage>354</lpage><pub-id pub-id-type="doi">10.1037/0096-3445.133.3.339</pub-id><pub-id pub-id-type="medline">15355143</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lavie</surname><given-names>N</given-names> </name></person-group><article-title>Distracted and confused?: Selective attention under load</article-title><source>Trends Cogn Sci (Regul Ed)</source><year>2005</year><month>02</month><volume>9</volume><issue>2</issue><fpage>75</fpage><lpage>82</lpage><pub-id pub-id-type="doi">10.1016/j.tics.2004.12.004</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rees</surname><given-names>G</given-names> </name><name name-style="western"><surname>Frith</surname><given-names>CD</given-names> </name><name name-style="western"><surname>Lavie</surname><given-names>N</given-names> </name></person-group><article-title>Modulating irrelevant motion perception by varying attentional load in an unrelated task</article-title><source>Science</source><year>1997</year><month>11</month><day>28</day><volume>278</volume><issue>5343</issue><fpage>1616</fpage><lpage>1619</lpage><pub-id pub-id-type="doi">10.1126/science.278.5343.1616</pub-id><pub-id pub-id-type="medline">9374459</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Neokleous</surname><given-names>K</given-names> </name><name name-style="western"><surname>Shimi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Avraamides</surname><given-names>MN</given-names> </name></person-group><article-title>Modeling the effects of perceptual load: saliency, competitive interactions, and top-down biases</article-title><source>Front Psychol</source><year>2016</year><volume>7</volume><fpage>1</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2016.00001</pub-id><pub-id pub-id-type="medline">26858668</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Huang</surname><given-names>L</given-names> </name><name name-style="western"><surname>Pashler</surname><given-names>H</given-names> </name></person-group><article-title>Attention capacity and task difficulty in visual search</article-title><source>Cognition</source><year>2005</year><month>01</month><volume>94</volume><issue>3</issue><fpage>B101</fpage><lpage>B111</lpage><pub-id pub-id-type="doi">10.1016/j.cognition.2004.06.006</pub-id><pub-id pub-id-type="medline">15617666</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bravo</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Nakayama</surname><given-names>K</given-names> </name></person-group><article-title>The role of attention in different visual-search tasks</article-title><source>Percept Psychophys</source><year>1992</year><month>09</month><volume>51</volume><issue>5</issue><fpage>465</fpage><lpage>472</lpage><pub-id pub-id-type="doi">10.3758/BF03211642</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Alaeifard</surname><given-names>M</given-names> </name><name name-style="western"><surname>Safaei</surname><given-names>M</given-names> </name></person-group><article-title>Head movement patterns as predictors of cybersickness in virtual reality games</article-title><source>Int J Adv Hum Comput Interact</source><year>2024</year><month>07</month><day>19</day><volume>2</volume><issue>2</issue><fpage>1</fpage><pub-id pub-id-type="doi">10.1016/j.cognition.2004.06.006</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Weech</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kenny</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lenizky</surname><given-names>M</given-names> </name><name name-style="western"><surname>Barnett-Cowan</surname><given-names>M</given-names> </name></person-group><article-title>Narrative and gaming experience interact to affect presence and cybersickness in virtual reality</article-title><source>Int J Hum Comput Stud</source><year>2020</year><month>06</month><volume>138</volume><fpage>102398</fpage><pub-id pub-id-type="doi">10.1016/j.ijhcs.2020.102398</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Example of a feature search trial used in the Visual Search task of this experimental study. In this trial, participants searched for a gray 2-sided axe (target) among 24 brown 2-sided axes (distractors). This is a trial with a display size of 25. The red outline is added for illustration purposes and was not present in the actual display viewed by participants.</p><media xlink:href="formative_v9i1e65836_app1.png" xlink:title="PNG File, 428 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Mean span scores in the Corsi block-tapping task across the 3 administration conditions. Span scores reflect the longest sequence of targets correctly recalled by participants. Error bars represent 95% confidence intervals.</p><media xlink:href="formative_v9i1e65836_app2.png" xlink:title="PNG File, 22 KB"/></supplementary-material></app-group></back></article>