<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Form Res</journal-id><journal-id journal-id-type="publisher-id">formative</journal-id><journal-id journal-id-type="index">27</journal-id><journal-title>JMIR Formative Research</journal-title><abbrev-journal-title>JMIR Form Res</abbrev-journal-title><issn pub-type="epub">2561-326X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v10i1e87121</article-id><article-id pub-id-type="doi">10.2196/87121</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Opportunities and Challenges of Generative AI in Postgraduate Health Professions Education Assessments From Educator and Learner Perspectives: Qualitative Study</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Phillips</surname><given-names>Carys</given-names></name><degrees>BMBS, MSc</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Harrison</surname><given-names>David</given-names></name><degrees>MA, PhD</degrees><xref ref-type="aff" rid="aff1"/><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Research Department of Medical Education, University College London</institution><addr-line>Education RCP, 11 St Andrews Place</addr-line><addr-line>London</addr-line><country>United Kingdom</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Sarvestan</surname><given-names>Javad</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Ting</surname><given-names>Eon</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Gupta</surname><given-names>Subhas</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to David Harrison, MA, PhD, Research Department of Medical Education, University College London, Education RCP, 11 St Andrews Place, London, WC1E 6BT, United Kingdom, 44 7713003204; <email>david.harrison@ucl.ac.uk</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>6</day><month>5</month><year>2026</year></pub-date><volume>10</volume><elocation-id>e87121</elocation-id><history><date date-type="received"><day>04</day><month>11</month><year>2025</year></date><date date-type="rev-recd"><day>29</day><month>03</month><year>2026</year></date><date date-type="accepted"><day>01</day><month>04</month><year>2026</year></date></history><copyright-statement>&#x00A9; Carys Phillips, David Harrison. Originally published in JMIR Formative Research (<ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>), 6.5.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Formative Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://formative.jmir.org">https://formative.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://formative.jmir.org/2026/1/e87121"/><abstract><sec><title>Background</title><p>The application of artificial intelligence (AI) is increasingly valuable as a tool and assistant in many areas of clinical and academic medicine. Generative AI (GenAI) creates new content used by large language models, which can generate language that strongly resembles or even improves on that of humans. Learners and educators in many areas of education are using GenAI for essays and assessments, raising issues regarding learning and assessment. GenAI is also raising new concerns in health professions education (HPE), an area of health professions training that sometimes has different aims and assessment methods compared to its clinical counterparts. HPE needs to assess levels of knowledge and understanding of pedagogy, and the use of GenAI presents challenges to its current assessments, which are predominantly written.</p></sec><sec><title>Objective</title><p>The study aimed to investigate educators&#x2019; and learners&#x2019; perspectives on the opportunities and challenges presented by GenAI in postgraduate HPE assessments. It particularly focused on perspectives of how GenAI may influence the future of assessment and essay-based assessments in HPE.</p></sec><sec sec-type="methods"><title>Methods</title><p>Informed by a constructivist paradigm, a qualitative approach was adopted, undertaking 8 semistructured interviews conducted via Microsoft Teams. Purposive sampling ensured a mixture of educators and learners in current HPE courses from a range of health care professions. Data were thematically analyzed.</p></sec><sec sec-type="results"><title>Results</title><p>There was no difference between educator and learner perspectives. Four themes were identified: AI is here, students are at a disservice if we do not embrace it; AI as an opportunity to rethink HPE assessments; AI is a &#x201C;gray area&#x201D;; and AI is fallible.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The findings present AI as an external catalyst, highlighting the current internal desire for assessment change within HPE. It offers opportunities for creative, authentic assessments that reflect real-life academic and clinical practice, aiming to develop competent future HPE educators and keep courses relevant. These findings contribute to the debate around the future potential and development of AI in HPE assessments.</p></sec></abstract><kwd-group><kwd>assessments</kwd><kwd>AI</kwd><kwd>artificial intelligence</kwd><kwd>GenAI</kwd><kwd>generative artificial intelligence</kwd><kwd>postgraduate</kwd><kwd>health professions education</kwd><kwd>medical education</kwd><kwd>written assessment</kwd><kwd>essay</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>National Health Service England&#x2019;s 2023 <italic>Long Term Workforce Plan</italic> outlines the need to develop an increasing number of skilled health care professionals to address the projected workforce shortfall from imminent &#x201C;demographic pressures&#x201D; and &#x201C;changing burden of disease&#x201D; [<xref ref-type="bibr" rid="ref1">1</xref>]. Subsequently, education and training needs are predicted to increase between 50% and 65% by 2030 and 2031 across all health care professions [<xref ref-type="bibr" rid="ref1">1</xref>], which necessitates a &#x201C;high-quality educator workforce&#x201D; [<xref ref-type="bibr" rid="ref2">2</xref>]. More recently, the <italic>10 Year Health Plan for England</italic> highlighted plans to &#x201C;modernize postgraduate medical education,&#x201D; as well as work with educational institutes to &#x201C;overhaul education and training curricula&#x201D; for health care professions, including training in artificial intelligence (AI) use [<xref ref-type="bibr" rid="ref3">3</xref>]. This increasing demand for modern educators means that health professions education (HPE) programs must be confident they are appropriately assessing whether learners achieve required professional education standards in the light of a constantly changing world.</p><p>AI plays a crucial role in this process, both by driving change and providing advanced tools and technologies that support personalized learning, efficient assessment, and continuous improvement in educational practices. More specifically, &#x201C;generative AI&#x201D; (GenAI) is now widely used and rapidly developing, capable of creating new content, including text, images, and other media. Within GenAI, natural language processing allows computers to understand human language; analyze, process, and interpret text; extract meaning; and perform tasks previously thought to require real intelligence [<xref ref-type="bibr" rid="ref4">4</xref>]. Large language models (LLMs) are a further type of GenAI that perform natural language processing tasks by using &#x201C;artificial intelligence algorithms to generate language that resembles that produced by humans&#x201D; [<xref ref-type="bibr" rid="ref4">4</xref>].</p><p>GenAI has significant ramifications within education, including HPE, which have become noticeable since the increased use following the release of the updated LLM by OpenAI in November 2022, ChatGPT-3.5. There has been subsequent development of LLMs internationally. Concerns arise regarding inequity for learners in barriers accessing GenAI, but these will likely diminish with their ongoing development, driving down costs and increasing accessibility. Crucially, the use of GenAI has triggered concerns among many HPE educators around how AI will influence teaching and learning and, especially, assessments. Current forms of assessment in HPE, especially at master&#x2019;s level, are traditionally essay-based, incorporating reports, literature reviews, reflective pieces, and theses. By contrast, clinical qualifications tend to use &#x201C;objective&#x201D; examinations and work-based assessments. Hence, within HPE, there are increased concerns regarding potential academic misconduct in essay-type assessments if students use AI [<xref ref-type="bibr" rid="ref5">5</xref>] and the implications for the quality and competence of these future educators when they enter the workforce.</p></sec><sec id="s1-2"><title>Cheating and AI</title><p>Concerns about cheating in assessments are not new. Essay-based assessments have always been open to &#x201C;contract cheating,&#x201D; when a student submits work written by someone else, but GenAI has made this more accessible [<xref ref-type="bibr" rid="ref6">6</xref>] and has blurred the lines between cheating, plagiarism, and a &#x201C;helping hand.&#x201D; Among academics, vacillation is rife over whether GenAI is a &#x201C;game-changer&#x201D; and represents an end to the essay-based assessment or adds little to the multiple ways students can already cheat [<xref ref-type="bibr" rid="ref7">7</xref>]. Besides potential academic misconduct, there are other considerations related to the use of AI within HPE assessment: privacy risks involving sensitive data [<xref ref-type="bibr" rid="ref8">8</xref>], bias arising from the data on which the AI systems are trained [<xref ref-type="bibr" rid="ref9">9</xref>], and &#x201C;hallucinations&#x201D; whereby AI creates false information and fabricates references [<xref ref-type="bibr" rid="ref10">10</xref>].</p><p>&#x201C;AI-output detectors&#x201D; detect the use of AI in academic work, with varying results against humans in detecting AI-generated questions [<xref ref-type="bibr" rid="ref11">11</xref>], but they have successfully discriminated between most original and AI-generated abstracts [<xref ref-type="bibr" rid="ref12">12</xref>]. Indeed, there are platforms to help bypass AI-output detectors, so in this escalating arms race, the speed of AI&#x2019;s sophisticated evolution makes it almost impossible for output detectors to stay ahead. This increases concerns about academic misconduct or misapplication in HPE assessments and questions whether educators or software will be able to detect its use.</p></sec><sec id="s1-3"><title>AI Literacy</title><p>AI literacy is &#x201C;the ability to understand, use, monitor, and critically reflect on AI applications without necessarily being able to develop AI models themselves&#x201D; [<xref ref-type="bibr" rid="ref13">13</xref>] and is necessary for both learners and educators to effectively use AI within assessments. Although self-ratings for AI-knowledge in health professions students are low [<xref ref-type="bibr" rid="ref14">14</xref>], learner perceptions toward AI are positive [<xref ref-type="bibr" rid="ref15">15</xref>], wanting AI to be incorporated into medical school curricula [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. If AI is formally incorporated into the curriculum, this will affect both <italic>how</italic> and <italic>what</italic> we assess, and educators will need to feel confident in AI literacy, as there are a multitude of ways to incorporate AI into assessment [<xref ref-type="bibr" rid="ref18">18</xref>]. Tlili et al [<xref ref-type="bibr" rid="ref19">19</xref>] identified the need to &#x201C;upskill&#x201D; educators on AI&#x2019;s practicalities and how to design and teach it in curricula. Educator training would improve AI literacy and address faculty concerns related to using AI, such as misinformation or academic misconduct [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. Current assessments within HPE are unlikely to be suitable to assess AI literacy, yet not incorporating AI in learning and assessment will likely impede future educators, learners, and patients, given AI&#x2019;s growing presence.</p></sec><sec id="s1-4"><title>Communities of Practice</title><p>Communities of practice (CoP) are &#x201C;groups of people who share a concern or a passion for something they do and learn how to do it better as they interact regularly&#x201D; [<xref ref-type="bibr" rid="ref22">22</xref>], and here it is used as a framework to consider how HPE forms CoP to co-construct knowledge and stay &#x201C;ahead of the curve&#x201D; in the face of the complex challenges, changes, and opportunities AI presents.</p><p>This research focuses on the views of opportunities and challenges within HPE assessment of GenAI (AI here refers to GenAI unless specified otherwise), specifically LLMs. The approach analyzes participants&#x2019; perspectives on the concept of LLMs in the context of HPE assessments. Discourse within the educator community is currently evolving and full of uncertainty, particularly in exploring how AI may specifically influence written HPE assessments, so we ask the question: what are educators&#x2019; and learners&#x2019; perspectives on the opportunities and challenges presented by GenAI in postgraduate HPE assessments?</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Methodology</title><p>This research focused on exploring perspectives from a constructivist paradigm, using qualitative methodology. It aimed to explore educators&#x2019; and learners&#x2019; perceptions of how AI may influence HPE assessments, rather than testing knowledge of AI. Participants&#x2019; experiences are explored with the idea of effecting potential change&#x2014;whether in an approach to future assessment or attitudes to AI within the HPE community. Methods are reported in line with the Consolidated Criteria for Reporting Qualitative Research (COREQ) checklist (<xref ref-type="supplementary-material" rid="app2">Checklist 1</xref>) [<xref ref-type="bibr" rid="ref23">23</xref>].</p></sec><sec id="s2-2"><title>Sampling and Recruitment</title><p>Educators currently teaching or assessing a postgraduate HPE course and current learners enrolled in a postgraduate HPE course were invited via purposive sampling to participate in semistructured interviews exploring educators&#x2019; and learners&#x2019; perspectives. Educators and students from different HPE courses across the United Kingdom were invited to avoid response bias specific to perspectives from a single institution. As GenAI was a relatively new concept at the time of interviews (January to March 2024), the inclusion criteria specified that only educators and postgraduate learners currently involved in a course were invited to participate. Exclusion criteria were any undergraduate students or individuals either teaching or being taught by CP. As there was no 1 overall group from which to recruit participants, purposive sampling ensured the invitation reached those who met the criteria. Participants were recruited through an email invitation sent by CP, and participants were invited to forward the invitation to colleagues to encourage snowball sampling. CP and administrators of appropriate HPE social media groups distributed the invitations through their respective health profession education social media channels (Microsoft Teams and WhatsApp) to widen the invitation&#x2019;s reach. It was unclear how many were approached via this method, as there are multiple educators and learners on such channels.</p></sec><sec id="s2-3"><title>Research Team and Reflexivity</title><p>CP held roles as clinician, researcher, and educator. She was both an educator and learner on different postgraduate HPE courses. The research was for her master&#x2019;s thesis (for MSc in medical education at University College London/Royal College of Physicians), with prior experience in qualitative research. DH was her supervisor and educationalist.</p><p>Specific ethical implications were considered, as recruitment methods included inviting some participants who were colleagues of the interviewer (CP). The participant information leaflet clearly outlined the intent of the research, and alongside the email invitation, it was made clear that the study was part of a master&#x2019;s thesis and participation was voluntary. The invitation was also disseminated by administrators via wider social media channels to increase the uptake of participants, especially those not known to the interviewer.</p><p>Reflexively, CP was aware that their dual role as a current learner and as an educator with an interest in assessment and AI could potentially be intimidating in interviews if participants did not feel knowledgeable about AI, thereby creating a barrier to honesty. However, the dual role could also have been advantageous if interviewees felt that CP had an understanding of their role and, therefore, may have felt more comfortable.</p><p>DH was the supervisor for this research and an educationalist with assessment responsibilities and an interest in how AI is impacting assessment. DH was very aware of the potential for a conflict of interest&#x2014;particularly if current assessment methods were perceived as no longer fit for purpose. DH had no knowledge or contact with the participants, and none of them were involved in the assessment process of any programs involving DH.</p></sec><sec id="s2-4"><title>Methods of Data Collection</title><p>Semistructured interviews were conducted by CP as the sole interviewer on Microsoft Teams and auto-transcribed. The transcripts were then pseudonymized and edited by the interviewer to ensure they were transcribed verbatim. Interviews were audio-video recorded with the camera off to encourage participants to speak freely and to increase confidentiality, and recordings were deleted at the end of the withdrawal period. Only the interviewer (CP) and the participant were present during the interview, and there were no repeat interviews. CP made research notes during the interviews to refer back to during analysis. The length of interviews can be found in the Results section. The interview schedule (<xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) was developed by CP to answer the research questions while allowing flexibility, with initial questions being reviewed and revised based on feedback from DH. The interview schedule started with more open-ended questions on the participants&#x2019; current roles and HPE assessments, before exploring their knowledge, perceptions, and use of AI both personally and within their communities. Subsequent questions were more structured to address the research questions and examined opportunities and challenges of AI within HPE assessments for both educators and learners. The interview schedule was not piloted due to concerns of low recruitment but was revised based on feedback from DH, and the semistructured nature of the interview allowed for sufficient flexibility. There was no member checking of transcripts, both for feasibility and to avoid participants reconsidering their original views.</p></sec><sec id="s2-5"><title>Data Analysis</title><p>Coding was undertaken by 1 data coder (CP) using NVivo 14 (Lumivero), as part of an independent master&#x2019;s thesis. Data were analyzed using Braun and Clark&#x2019;s thematic analysis [<xref ref-type="bibr" rid="ref24">24</xref>], following the 6-phase process and using a predominantly inductive approach to reflect the relative novelty of the topic at the time.</p><p>In phase 1, CP immersed herself in the data by editing and rereading the transcripts. In phase 2, in keeping with Braun and Clarke&#x2019;s [<xref ref-type="bibr" rid="ref24">24</xref>] thematic analysis, &#x201C;codes&#x201D; were given to lengths of text relevant to the research question. Data that captured similar meanings were assigned to the same code. In phase 3, codes were then constructed into subthemes and then grouped under themes. There was no participant checking of codes. Associated codes from themes and subthemes helped identify illustrative quotes outlined in the Results section. In phase 4, themes were reviewed, and as CP was the only coder, there was no interresearcher triangulation of themes and, therefore, no disagreements. The limitations of having a single coder in triangulation and the potential for bias were acknowledged. However, data analysis started after a couple of interviews had been conducted and transcribed, and emerging themes and subthemes were continually revised and reviewed as further interviews were conducted and subsequently analyzed. Research notes made during interviews allowed for further refining of themes.</p><p>In phase 5, the datasets were reviewed, analyzed, and visualized on a thematic map (<xref ref-type="fig" rid="figure1">Figure 1</xref>), enabling further refinement of coherent themes and subthemes without repetition. Phase 6 involved writing up the analysis. Reflexively, CPs&#x2019; lived experience straddling educator and learner roles while undertaking HPE assessments in an evolving AI era may have helped co-construct knowledge and understanding and enabled development and depth to the codes and themes.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Thematic map. AI: artificial intelligence; HPE: health professions education.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="formative_v10i1e87121_fig01.png"/></fig></sec><sec id="s2-6"><title>Ethical Considerations</title><p>Data were stored securely on a university password-protected platform. Ethics approval was granted by the University College London Research Ethics Committee (ethics 23511/011). Participation was voluntary and without compensation, and informed consent was obtained from all participants. Consent for publication was obtained from all participants; no identifiable data were used.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>Eight semistructured interviews were conducted via Microsoft Teams between January and March 2024 (4 educators [E] and 4 learners [L]), lasting 30 to 60 minutes (<xref ref-type="table" rid="table1">Table 1</xref>). Six doctors and 2 nurses across 6 different higher institutes within the United Kingdom were interviewed. Data saturation was reached after 8 interviews, reaching informational redundancy. The participants had a range of prior AI knowledge; however, the distinction between learners and educators was blurred, as all educators had previously been learners in HPE, and all learners were currently educators in some capacity, which may have led to more analogous perspectives. The dual role of interviewee as both educators and learners may, therefore, have led to theme convergence.</p><p>The iterative process of data analysis reduced 15 subthemes to 10 and 6 themes to 4. The four themes are (1) AI as an opportunity to rethink HPE assessments; (2) AI is here, students are at a disservice if we do not embrace it; (3) AI is a gray area; and (4) AI is fallible.</p><p>As seen in the thematic map (<xref ref-type="fig" rid="figure1">Figure 1</xref>), some themes and subthemes have strong associations (bidirectional arrows) and affiliations (straight lines), demonstrating the interconnection between concepts.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Participant demographics.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Participant</td><td align="left" valign="bottom">Length of interview (mean 35.5 min, SD 10.16)</td><td align="left" valign="bottom">Clinical background</td></tr></thead><tbody><tr><td align="left" valign="top">L1</td><td align="char" char="." valign="top">32 min</td><td align="left" valign="top">Nurse</td></tr><tr><td align="left" valign="top">L2</td><td align="char" char="." valign="top">41 min</td><td align="left" valign="top">Doctor</td></tr><tr><td align="left" valign="top">L3</td><td align="char" char="." valign="top">57 min</td><td align="left" valign="top">Doctor</td></tr><tr><td align="left" valign="top">L4</td><td align="char" char="." valign="top">24 min</td><td align="left" valign="top">Doctor</td></tr><tr><td align="left" valign="top">E1</td><td align="char" char="." valign="top">25 min</td><td align="left" valign="top">Doctor</td></tr><tr><td align="left" valign="top">E2</td><td align="char" char="." valign="top">33 min</td><td align="left" valign="top">Doctor</td></tr><tr><td align="left" valign="top">E3</td><td align="char" char="." valign="top">30 min</td><td align="left" valign="top">Nurse</td></tr><tr><td align="left" valign="top">E4</td><td align="char" char="." valign="top">42 min</td><td align="left" valign="top">Doctor</td></tr></tbody></table></table-wrap><sec id="s3-1-1"><title>Theme 1: AI as an Opportunity to Rethink HPE Assessments</title><sec id="s3-1-1-1"><title>Overview</title><p>All participants, except 1, had only written assessments in their HPE course (7/8, 88%): predominantly essays, with some research proposals and literature reviews. AI represented an overdue opportunity to &#x201C;rethink and go back to the drawing board about why we&#x2019;re assessing and what people are learning and how&#x201D; (L2). HPE assessments have remained static, and all participants apart from one felt that the established method of predominantly essay-based assessments was not the most appropriate way to assess educators for the modern world, which included rising AI use and difficulty detecting its use, exemplified by L3: &#x201C;Are they [assessments] valid or are we simply just gonna get a whole load of ChatGPT generated work?&#x201D;</p><p>The desire for internal change existed within HPE prior to recent external pressure of AI, and all participants (8/8, 100%) wanted HPE assessments that constructively aligned with the development of critical-thinking educators, proficient in pedagogy, especially in practical application.</p><p>There were 2 subthemes: HPE assessments need to evolve and assessment design or creativity drives AI use.</p></sec><sec id="s3-1-1-2"><title>Subtheme 1.1: HPE Assessments Need to Evolve</title><p>There was a consensus that assessments in HPE should evolve from being predominantly essay-based for a multitude of reasons: written assessments were easier to produce using AI, disadvantaged learners who struggled with writing were often assessed on writing ability as opposed to content, lacked creativity, and might not demonstrate knowledge or reflect engagement in the course.</p><p>All participants except 1 (88%) felt it was important to retain some written assessments, such as essay writing, to hone critical and academic writing skills and prepare learners for future academic pursuits while also acknowledging the educational impact written assessments had in driving their learning. Keeping assessments as they are is perceived as the easier option, with less resistance and easier to mark than potential alternatives: &#x201C;[T]here&#x2019;s no criticism of people that are just continuing and not...putting their heads above the parapet because at the end of the day it is a complex task&#x201D; (L2)</p></sec><sec id="s3-1-1-3"><title>Subtheme 1.2: Assessment Design or Creativity Deters AI Reliance</title><p>Lack of diversity in assessment types combined with lack of time may drive learners to turn to AI, described by L1: &#x201C;When you go in and you go &#x2018;Oh no, another 4000 word essay&#x2019;...you're just like, &#x2018;Oh my...I don't wanna do this&#x2019; and therefore you're more likely to turn to...generative AI.&#x201D;</p><p>Careful assessment design and creativity to incorporate AI &#x201C;so that it doesn&#x2019;t diminish learning and assessment&#x201D; (L1) will also create assessments that are less amenable to being produced purely by AI, without having to ban AI use outright.</p><p>Suggestions were made for assessments to better reflect real life (<xref ref-type="other" rid="box1">Textbox 1</xref>); within research, this may be writing abstracts, posters, or journal articles, and within clinical teaching, collaboration and group work. However, these &#x201C;might be challenging for [educators] because it would be a complete fundamental change&#x201D; (L2). Lack of creativity in current assessments may drive AI use, and further steps could be taken to demonstrate student input, such as in presentations or explaining how they interacted with AI. Some educators described how they were reviewing their assessments in light of AI, but most were currently only at the discussion stage.</p><boxed-text id="box1"><title> Suggested alternative health professions education assessment types.</title><p>The following are the health professions education assessment types:</p><list list-type="bullet"><list-item><p>Viva (oral examination)</p></list-item><list-item><p>Poster presentation</p></list-item><list-item><p>Video</p></list-item><list-item><p>Collaborative working</p></list-item><list-item><p>Oral presentation</p></list-item><list-item><p>Teaching observation and feedback</p></list-item><list-item><p>Project work or group research</p></list-item><list-item><p>Abstract or journal article</p></list-item></list></boxed-text></sec></sec></sec><sec id="s3-2"><title>Theme 2: AI Is Here, Students Are at a Disservice If We Do Not Embrace It</title><sec id="s3-2-1"><title>Overview</title><p>AI is part of everyday life, both educationally and clinically. No participants described their current HPE course as either incorporating or teaching AI use, with concern by some participants that educators may not acknowledge AI, allowing courses to continue as they are, perhaps driven partly by fear. Participants felt that if AI is not incorporated into teaching and, therefore, assessment, learners would be placed at a disadvantage, and the HPE course may be subsequently less relevant and attractive to future learners.</p><disp-quote><p>I don&#x2019;t think it&#x2019;s right to say, don&#x2019;t use generative AI at all, because I just don&#x2019;t think that&#x2019;s realistic.</p><attrib>E4</attrib></disp-quote><p>There were 3 subthemes: incorporating AI into assessment, increasing efficiency, and learning and leveling the playing field.</p></sec><sec id="s3-2-2"><title>Subtheme 2.1: Incorporating AI Into Assessment</title><p>A total of 2 (50%) learners had used AI within their HPE course for learning but not summative assessment; 1 did not use it in their assessment, as they felt it was unnecessary. Of all, 2 (50%) educators described using it in their teaching and 1 using it to aid assessment design. Participants discussed how AI could be incorporated into the assessment process in <xref ref-type="other" rid="box2">Textbox 2</xref>.</p><boxed-text id="box2"><title> Suggested uses by participants of artificial intelligence in health professions education assessments.</title><p>The following are the suggested uses by participants of artificial intelligence (AI) in health professions education assessments:</p><list list-type="bullet"><list-item><p>AI to assist in image creation or graphical depiction of research</p></list-item><list-item><p>AI to create a poster to present work or research</p></list-item><list-item><p>Integration of AI within assessment&#x2014;marks allocated to AI use or combine AI input with human oversight or feedback</p></list-item><list-item><p>Demonstrate critical analysis of AI output (ie, demonstrate reworking of prompts and rationale; editing, fact-checking, or reflection on AI output)</p></list-item><list-item><p>Use AI as an exemplar assessment to critique (may be an example of &#x201C;bare minimum pass&#x201D;)</p></list-item><list-item><p>AI to assist educators to mark assessments and provide learner feedback</p></list-item><list-item><p>Provide learners with personalized feedback on draft assessment</p></list-item></list></boxed-text></sec><sec id="s3-2-3"><title>Subtheme 2.2: Increase Efficiency and Learning</title><p>Participants suggested how AI can increase efficiency in assessments for both educators and learners (<xref ref-type="other" rid="box3">Textbox 3</xref>). The educational impact of incorporating AI into the assessment process was described by L1: &#x201C;(AI) would sort of relieve the pressure of time, so then you can do those more exciting, interesting things...and in turn makes students more motivated.&#x201D;</p><p>A total of 6 (75%) participants had used AI in some capacity within HPE. <xref ref-type="other" rid="box4">Textbox 4</xref> outlines how they used AI in HPE assessments to facilitate learning, highlighting similarities between educators and learners.</p><p>Participants discussed how HPE courses aimed to develop competent health professions educators to teach health professions with the ultimate aim of improving patient care.</p><p>Participants also highlighted the need to retain human educator oversight and involvement within HPE and assessment, to avoid losing the &#x201C;human touch,&#x201D; as AI alone would be too impersonal, decreasing motivation and learning. As described by E4: &#x201C;I don&#x2019;t think [AI] would get rid of jobs completely.&#x201D;</p><p><xref ref-type="table" rid="table2">Table 2</xref> outlines participant suggestions on how AI could be used alongside the learner or educator to facilitate assessment <italic>as</italic>, <italic>for,</italic> and <italic>of learning</italic>, as similarly seen in the literature [<xref ref-type="bibr" rid="ref25">25</xref>].</p><boxed-text id="box3"><title> Suggestions on how artificial intelligence may increase efficiency.</title><p>The following are the suggestions on how artificial intelligence may increase efficiency:</p><list list-type="bullet"><list-item><p>Conduct literature search, identify papers</p></list-item><list-item><p>Project outlines</p></list-item><list-item><p>Access information, compile information</p></list-item><list-item><p>Summarize concepts or ideas</p></list-item><list-item><p>Write first draft (eg, introduction, reference)</p></list-item><list-item><p>Develop presentations, poster, or slides</p></list-item><list-item><p>Mark assessments</p></list-item><list-item><p>Provide evaluation or feedback</p></list-item></list></boxed-text><boxed-text id="box4"><title> How AI is being used in health professions education assessments.</title><p>The following illustrates the ways in which artificial intelligence (AI) is being used in health professions education assessments:</p><list list-type="bullet"><list-item><p>Offer new perspectives and ideas</p></list-item><list-item><p>As an alternative to search engines</p></list-item><list-item><p>To test and check hypotheses</p></list-item><list-item><p>To confirm and check current knowledge</p></list-item><list-item><p>Generate an answer to a question</p></list-item><list-item><p>To provide a critical analysis</p></list-item><list-item><p>To generate initial ideas</p></list-item><list-item><p>Rephrasing, editing, finessing</p></list-item><list-item><p>Prompt and creative outlet in assessment creation</p></list-item></list></boxed-text><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Suggested uses of AI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">AI use</td><td align="left" valign="bottom">Practical suggestions</td></tr></thead><tbody><tr><td align="left" valign="top">AI as a coteacher</td><td align="left" valign="top">To clarify or explain concepts that are unclear</td></tr><tr><td align="left" valign="top">AI as a tutor or mentor</td><td align="left" valign="top">To review work and provide suggestions</td></tr><tr><td align="left" valign="top">AI to encourage self-directed learning</td><td align="left" valign="top">To provide guidance that can enable self-directed learning (assessment as learning)</td></tr><tr><td align="left" valign="top">AI as an assistant</td><td align="left" valign="top">To produce an initial draft or outline that can be refined or edited</td></tr><tr><td align="left" valign="top">AI as an aid (for both educator and learner)</td><td align="left" valign="top">Assessments may be produced by AI then refined by learner, or be the &#x201C;first marker&#x201D; for assessors</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2-4"><title>Subtheme 2.3: Level the Playing Field</title><p>There are opportunities for AI to help level the playing field, such as for students with English as a second language, as also seen in other literature [<xref ref-type="bibr" rid="ref26">26</xref>]. It may also &#x201C;offer a bit more objectivity [and] consistency&#x201D; (E4) and remove any conscious or unconscious bias from assessors if used in marking. This was particularly relevant as participants felt that marking of essays in HPE lacked objectivity and reliability.</p></sec></sec><sec id="s3-3"><title>Theme 3: AI Is a &#x201C;Gray Area&#x201D;</title><sec id="s3-3-1"><title>Overview</title><p>All participants (8/8, 100%) felt that AI use and application within HPE assessments was a &#x201C;gray area&#x201D; and felt a lack of &#x201C;formal guidance&#x201D; (E2) from an institutional level. Half the educators (2/4, 50%) and 3 (3/4, 75%) learners knew their institution&#x2019;s stance on AI use within assessment, but many still felt uncertain about how to tangibly translate it to practice. E1 states: &#x201C;no one quite knows what we should and shouldn't be doing.&#x201D; Learners lacked confidence and wanted guidance from educators to ensure they did not get accused of academic misconduct, while educators found themselves relying on learners for guidance or education on AI too. The lack of clarity and confusion on AI use in assessment was identified as a loophole, meaning that learners could potentially use AI with a &#x201C;get-out clause,&#x201D; resulting in inequality if some students are using it and others are not.</p><p>There were concerns about whether AI use constituted plagiarism, due to a lack of familiarity and clarity regarding AI application and authorship. This created a sense of unease among most participants, making educators and learners reluctant to use it, even if permitted, for fear of incidental academic misconduct. Clear direction and boundaries on AI use had to come from an institutional level, in collaboration with subject matter experts and educators, but participants were concerned that &#x201C;getting stakeholder buy-in would be quite a hard thing&#x201D; (E4).</p><p>There were 4 subthemes: difficulty in detecting work by AI, knowledge gap, fear of the unknown, and may cause inequality.</p></sec><sec id="s3-3-2"><title>Subtheme 3.1: Difficulty in Detecting Work by AI</title><p>There was concern from both learners and educators about the difficulty of detecting AI in HPE assessments, with participants recommending the implementation of AI detection tools. However, participants were aware that AI detectors were often not used or were fallible. Some learners had to sign a statement of originality, while some universities allowed the use of AI with appropriate referencing. However, there was confusion among participants about &#x201C;how to use [AI] effectively&#x201D; (E4); &#x201C;if they [students] are even allowed to use it or is it a type of plagiarism?&#x201D; (E4); and if referenced, if that would be &#x201C;good enough&#x201D; (L4).</p><p>Participants noticed a formulaic &#x201C;hallmark&#x201D; style to AI output. Yet educators did not feel confident that they would be able to identify whether AI had been used in assessments, suggesting they would rely on knowing the student&#x2019;s voice or identifying phantom references to identify AI use.</p></sec><sec id="s3-3-3"><title>Subtheme 3.2: Knowledge Gap</title><p>The knowledge gap was a cause for concern among all participants, with both the newness of AI and fear of the unknown driving a wedge between those who have embraced it and those who have not. No learners described AI being formally taught on their HPE course, and those perceived as knowledgeable or familiar with AI were deemed more technologically savvy and self-taught. Most participants had not used AI as they felt &#x201C;underconfident&#x201D; (L4), and the more AI-confident participants displayed an innate interest in discovering AI&#x2019;s capabilities.</p><p>There was concern from some educators about the knowledge gap in AI, both personally and among their students. Only 1 (25%) educator had discussed AI with HPE colleagues and reported their colleagues were fearful and uncertain of its use, so knowledge was not being shared or cocreated among communities. Educators thought that learners, especially those who were younger, may have more knowledge on AI and that faculty &#x201C;need to keep up&#x201D; (E4) and &#x201C;upskill&#x201D; (E1).</p><p>There was mixed perception regarding whether learners or educators in HPE were more AI literate, with a likely range of knowledge within both cohorts. In this study, neither group demonstrated superior AI knowledge. Positively, both learners and educators were keen for teaching on AI at an institutional level, especially as educators felt that the students were expecting them to be up to date and teaching them. Participants wanted guidance on AI use: learners from educators and educators from their institutions&#x2014;in the form of policy and AI literacy workshops.</p></sec><sec id="s3-3-4"><title>Subtheme 3.3: Fear of the Unknown</title><p>AI&#x2019;s newness created fear and uncertainty for both educators and learners in both how and when to use it, and the inability to keep up with its constant evolution. Due to a lack of knowledge, they felt scared not only of how to use it within HPE assessment but also of getting in &#x201C;trouble&#x201D; for using it.</p></sec><sec id="s3-3-5"><title>Subtheme 3.4: May Cause Inequality</title><p>Learners with more knowledge or access to AI may have an advantage within HPE assessment and &#x201C;create a bit of an unfair playing ground&#x201D; (E2). Rather than leveling opportunities, AI may further drive inequality within HPE assessments because those who can afford more advanced AI platforms or have higher AI literacy levels may be better able to use it more successfully and evade detection if required.</p></sec></sec><sec id="s3-4"><title>Theme 4: AI Is Fallible</title><sec id="s3-4-1"><title>Overview</title><p>Participants discussed the merits and drawbacks of AI, specifically the inability to trust the output &#x201C;because it&#x2019;s never perfect&#x201D; (L1), often requiring a human to fact-check and review the output, as &#x201C;50% of it is excellent, 50% of it is rubbish&#x201D; (E1).</p><p>There was 1 subtheme: impede learning.</p><p>Participants were very cautious of AI and felt unable to trust its output, partly due to &#x201C;hallucinations.&#x201D; Participants highlighted concerns that some learners may not be aware they must critically appraise AI output, especially if in a rush or under time pressure, which would further learner inequality and &#x201C;gray areas&#x201D; based on AI literacy.</p></sec><sec id="s3-4-2"><title>Subtheme 4.1: Impede Learning</title><p>There was concern about the educational impact of assessments, as AI use within HPE assessment may not demonstrate learning. If learners use AI to &#x201C;generate their assignment...are they going to learn anything?&#x201D; (E3) and potentially pass an HPE course with minimal input or knowledge. HPE degrees should only be awarded to those &#x201C;deserving of that degree...(who) display a certain level of original ability (E2).&#x201D; However, some participants felt that AI cannot demonstrate high-level thinking or criticality, so the threat was lessened.</p><p>Participants highlighted the irony that educators may be using AI to mark AI-generated assessments; hence, learners may be able to pass an HPE course with minimal commitment or learning if only written assessments are required.</p></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><sec id="s4-1-1"><title>Implications for Assessment Design</title><p>AI highlights the internal need for a radical overhaul of HPE more broadly, with the assessments reflecting as such. AI, alongside ever-increasing medical knowledge, represents the need to transition from memorizing information to knowledge utilization [<xref ref-type="bibr" rid="ref27">27</xref>] and has provided the opportunity to reconsider HPE assessments, while incorporating AI, to upskill learners. Reconsidering HPE assessment design in light of the more recent external pressure of AI offers a rethink of both <italic>how</italic> and <italic>what</italic> we are assessing to ensure the course is suited to develop critical-thinking, better-prepared educators.</p><p>The findings build on longstanding concerns that &#x201C;traditional&#x201D; assessment methods (eg, essays) within master&#x2019;s programs lack consistency and that alternative assessment methods are preferable to improve both motivation and fairness [<xref ref-type="bibr" rid="ref28">28</xref>]. However, as participants wanted to retain some written assessments to develop critical analysis through essay writing, perhaps we can use AI&#x2019;s output to develop those skills [<xref ref-type="bibr" rid="ref8">8</xref>]. Education does students a disservice if new technologies, such as AI, are not embraced. Creating authentic, student-centered assessments will not only cultivate critical thinking, encourage learning, and decrease academic misconduct but also better prepare learners for future practice if they reflect genuine tasks undertaken by the HPE community [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. AI also influences clinical practice, improving diagnostic efficiency and accuracy [<xref ref-type="bibr" rid="ref15">15</xref>], so it is disingenuous not to use it in educating future educators.</p><p>Incorporating AI into assessment provides an opportunity to develop creative assessments that appeal to students&#x2019; different strengths and increase engagement while decreasing overreliance on AI to write the assessment. Interestingly, participants felt that current GenAI programs would not be able to write an entire essay sufficiently critical to successfully pass an HPE assessment. However, using targeted follow-up prompts, ChatGPT has been shown to produce a reasonable academic piece of work within 2 to 3 hours [<xref ref-type="bibr" rid="ref31">31</xref>], and despite some inaccuracies, this is much quicker and with minimal expertise or effort than a human-written essay.</p><p>AI has inevitably influenced HPE assessments, forcing educators and learners to decide whether to embrace it or ignore reality. The findings corroborate current literature [<xref ref-type="bibr" rid="ref32">32</xref>] in demonstrating how most participants were already embracing AI, suggesting ways it can be incorporated into assessment creation, design, implementation, and marking, with an understanding that in the future, AI and assessment will be inextricably linked. AI use, especially for repetitive or onerous tasks, may decrease the &#x201C;cognitive-load&#x201D; and create time and space for more creativity within assessments. Such &#x201C;cognitive offloading&#x201D; is described by Risko and Gilbert [<xref ref-type="bibr" rid="ref33">33</xref>] as the process whereby a physical action (such as using an LLM) can &#x201C;reduce the cognitive demands of a task&#x201D; [<xref ref-type="bibr" rid="ref33">33</xref>].</p><p>The intended learning outcomes of the HPE courses should be reconsidered to develop educators who will thrive in the era of learning with and from AI and, therefore, incorporate AI in assessments to constructively align with such teaching and learning [<xref ref-type="bibr" rid="ref8">8</xref>]. However, overreliance on AI may be detrimental to fundamental learning [<xref ref-type="bibr" rid="ref30">30</xref>], so educators need to carefully consider how AI applications best align with the intended learning outcomes.</p><p>Findings that the &#x201C;personal touch&#x201D; from educators was strongly valued by learners reflect similar research, which showed how online learning, specifically online assessments, that overrely on technology and lack human presence are &#x201C;dehumanized&#x201D; and detrimental to student learning [<xref ref-type="bibr" rid="ref34">34</xref>]. Human connection has been demonstrated as the most important factor in engagement and student learning [<xref ref-type="bibr" rid="ref35">35</xref>], and the collaboration between educators and AI in assessment design and delivery may not only promote community belonging for learners but also enhance engagement in both assessment and learning [<xref ref-type="bibr" rid="ref34">34</xref>]. This supports findings that AI and educators must coexist to facilitate learning.</p></sec><sec id="s4-1-2"><title>Institutional Policy Recommendations</title><p>The uncertainty surrounding AI use and plagiarism reflects current thinking that AI use requires a full reconsideration of what plagiarism now represents [<xref ref-type="bibr" rid="ref26">26</xref>]. Perkins [<xref ref-type="bibr" rid="ref26">26</xref>] suggests that clear acknowledgment of AI use by learners should not be considered academic misconduct by institutions, instead emphasizing that learning outcomes may not be met if AI generates such assessments. AI hesitancy in this study may reflect that health care professionals must hold honesty and integrity central to their vocation, and academic misconduct can be referred to their governing body (for doctors, the General Medical Council), inciting fear of accidental misuse. AI will likely always be one step ahead, and the deficiencies in AI detection tools make detection like a game of cat and mouse (described by [<xref ref-type="bibr" rid="ref36">36</xref>]). Furthermore, relying on educators detecting AI via methods such as identifying hallucinated references is highly subjective due to educators&#x2019; differentials and may exacerbate marking inequalities. Efforts should be redirected into collaborating and incorporating AI into assessment design and content, encouraging assessment as and for learning.</p><p>Both educators and learners wanted to increase their AI literacy and recognized an opportunity for increased learning, reflecting wider literature [<xref ref-type="bibr" rid="ref32">32</xref>], but acknowledged the complexity of this task and potential resistance to change at an institutional level. AI inequality in learners can be addressed by university access and increased AI literacy. Participants wanted the direction to come from their institutions; however, a study showed that in times of change (to online assessment), new assessors sought support and guidance from more informal networks within the communities of practice (eg, colleagues in similar situations) as opposed to formal networks (eg, university experts) [<xref ref-type="bibr" rid="ref37">37</xref>]. Therefore, creating assessor and learner CoPs to discuss and share AI practice could develop understanding and confidence with AI, with such support systems in new online learning environments helping to foster connection and engagement [<xref ref-type="bibr" rid="ref35">35</xref>].</p></sec><sec id="s4-1-3"><title>Implication for Educator Training</title><p>There are multiple resources from the online community on how to use GenAI (such as ChatGPT) within HPE; however, these assume a certain level of digital literacy of its readers (ie, [<xref ref-type="bibr" rid="ref38">38</xref>]). An informal assessor CoP, such as between colleagues, is an easy and effective starting point to discuss and share practice, developing AI literacy and confidence that all participants desired, and such communities have demonstrated success in facilitating change within HPE teaching practices [<xref ref-type="bibr" rid="ref39">39</xref>].</p><p>AI is a phenomenon that the assessor community must respond to, with institutional guidance. In a CoP, learners usually learn from the &#x201C;masters&#x201D; in the community; however, this may be flipped for AI, as it seems to be the masters dragging their feet. A CoP can facilitate successful institutional-level change in HPE, though this requires institutional appetite for change and keen faculty who are supported with time and resources to reflect on experiential learning [<xref ref-type="bibr" rid="ref40">40</xref>]. Current literature describes approaches to developing faculty AI literacy, improving educator confidence and ability to integrate AI into the curricula [<xref ref-type="bibr" rid="ref41">41</xref>], alongside the importance of collaboration and coproduction of knowledge between stakeholders (including AI specialists) to ensure current and evolving AI literacy and application [<xref ref-type="bibr" rid="ref42">42</xref>].</p><p>Early AI adopters may need to drive such an assessor CoP to develop a critical mass for AI to become widely accepted and to establish acceptable uses for learning. The assessor community needs to work more widely with colleagues who are at the forefront of or more comfortable with AI to drive change. This will represent interdepartmental, or even inter-university, cross-collaboration [<xref ref-type="bibr" rid="ref43">43</xref>].</p></sec></sec><sec id="s4-2"><title>Actionable Strategies in HPE to Guide Assessment Redesign</title><p>The following are the actionable strategies in HPE to guide assessment redesign:</p><list list-type="bullet"><list-item><p>&#x201C;Humanizing&#x201D; AI assessments involves integrating AI into assessment format and content, while ensuring the human component remains. This would likely improve both educator and learner experience, while deterring from full reliance on AI use.</p></list-item><list-item><p>Institutions should hold AI literacy workshops to increase AI literacy and access, as workshops and teaching for both educators and learners will further drive collaboration and successful CoPs.</p></list-item><list-item><p>Raise educator awareness and understanding of the ethical issues around AI and how these can impact learner acceptance of AI. There is a range of ethical issues, such as environmental cost, bias, data protection, digital inequalities, etc, that many educators are either unaware of or feel are the institutional decisions to be made. Learners have their own thoughts on these ethical issues, which impact their willingness and acceptance of AI use.</p></list-item><list-item><p>Establish or revisit assessor CoPs (driven by early AI adopters) to encourage sharing of AI knowledge, which will likely require institutional buy-in. Such CoPs may challenge the status quo, with &#x201C;masters&#x201D; learning from &#x201C;novices,&#x201D; and early AI adopters working collaboratively with assessment experts to improve and produce authentic HPE assessments.</p></list-item><list-item><p>Retain the essay as one of a multitude of assessments, with careful essay design to use AI for &#x201C;cognitive offloading&#x201D; (for educators and assessors) and enable higher-order critical thinking. AI has the potential to be an effective tool within learning and assessment, and some approaches can help improve AI&#x2019;s output: improving prompts, critically reviewing output for accuracy, and checking references.</p></list-item></list></sec><sec id="s4-3"><title>Future Research Needs</title><p>The following are the future research needs:</p><list list-type="bullet"><list-item><p>Consider how GenAI is influencing postgraduate HPE assessments outside the United Kingdom</p></list-item><list-item><p>Consider how GenAI is influencing HPE assessments with perspectives from educators and learners across different health professions</p></list-item></list></sec><sec id="s4-4"><title>Limitations</title><p>There was potential educator and learner response bias among those who felt more confident discussing AI. The research was specific to postgraduate HPE, so the opinions of undergraduates and students from different courses may differ. Interviews were conducted by a single interviewer (CP) as part of her master's thesis, and despite efforts at reflexivity, there may have been interviewer bias. The study is based on 8 interviews from institutions across the United Kingdom, predominantly doctors, so generalizability to different countries and courses may be limited. Furthermore, it is difficult to extrapolate group comparison due to the role duality of learner-educators, as well as the small sample size of only 4 participants per group. While it was not possible to obtain the assessment format from all HPE courses, as participants were from different HPE courses, they were able to provide an overview of various formats.</p></sec><sec id="s4-5"><title>Conclusion</title><p>Key findings indicate that educator and learner perspectives agree on the opportunities and challenges presented by GenAI: AI is here, students are at a disservice if we do not embrace it, AI as an opportunity to rethink HPE assessments, AI is a gray area, and AI is fallible. Participants consider AI to offer the opportunity for an overdue reevaluation of predominantly written-based HPE assessments to better reflect real clinical and academic life and develop educators who are going to enter the modern workforce in which AI is ever present. Developing an assessor CoP will allow for shared AI practice and knowledge cocreation, building both educators&#x2019; and learners&#x2019; confidence in this new field.</p></sec></sec></body><back><ack><p>The authors would also like to thank participants for taking part in interviews. They presented this research at ASME 2025 and thank the audience for their questions and input. They would like to thank the editor and reviewers of <italic>JMIR Formative Research</italic> for their constructive comments. Generative artificial intelligence was not used in the generation of the manuscript.</p></ack><notes><sec><title>Funding</title><p>No funding was received. This was completed as CP&#x2019;s master's thesis.</p></sec><sec><title>Data Availability</title><p>Some datasets used and analyzed during this study are available from the corresponding author on reasonable request. The transcripts from the interviews were deleted on conclusion of the project in line with ethics approval. Datasets available are coded extracts and deidentified quotations.</p></sec></notes><fn-group><fn fn-type="con"><p>CP undertook the primary research as her thesis for a master's in medical education, under the supervision of master&#x2019;s supervisor DH. CP developed the concept alongside discussion and development with DH. CP designed the interview schedule, conducted interviews, analyzed the data, wrote the original manuscript, and edited or reviewed the final manuscript. DH supervised the project, codeveloped the concept with CP, reviewed the interview schedule and methodology, and reviewed and edited the final manuscript.</p></fn><fn fn-type="conflict"><p>CP was a master's student at University College London, Royal College of Physicians in Medical Education. DH is a senior educationalist at the Royal College of Physicians.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CoP</term><def><p>communities of practice</p></def></def-item><def-item><term id="abb3">COREQ</term><def><p>Consolidated Criteria for Reporting Qualitative Research</p></def></def-item><def-item><term id="abb4">GenAI</term><def><p>generative artificial intelligence</p></def></def-item><def-item><term id="abb5">HPE</term><def><p>health professions education</p></def></def-item><def-item><term id="abb6">LLM</term><def><p>large language model</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="report"><article-title>NHS long term workforce plan</article-title><year>2023</year><access-date>2025-09-25</access-date><publisher-name>NHS England</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.england.nhs.uk/wp-content/uploads/2023/06/nhs-long-term-workforce-plan-v1.2.pdf">https://www.england.nhs.uk/wp-content/uploads/2023/06/nhs-long-term-workforce-plan-v1.2.pdf</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="web"><article-title>Educator workforce strategy</article-title><source>NHS England</source><year>2023</year><access-date>2025-09-26</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.hee.nhs.uk/sites/default/files/EducatorWorkforceStrategy.pdf">https://www.hee.nhs.uk/sites/default/files/EducatorWorkforceStrategy.pdf</ext-link></comment></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="report"><article-title>Fit for the future: 10 year health plan for England</article-title><year>2025</year><access-date>2026-04-15</access-date><publisher-name>NHS England</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://assets.publishing.service.gov.uk/media/6888a0b1a11f859994409147/fit-for-the-future-10-year-health-plan-for-england.pdf">https://assets.publishing.service.gov.uk/media/6888a0b1a11f859994409147/fit-for-the-future-10-year-health-plan-for-england.pdf</ext-link></comment></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Clusmann</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kolbinger</surname><given-names>FR</given-names> </name><name name-style="western"><surname>Muti</surname><given-names>HS</given-names> </name><etal/></person-group><article-title>The future landscape of large language models in medicine</article-title><source>Commun Med (Lond)</source><year>2023</year><month>10</month><day>10</day><volume>3</volume><issue>1</issue><fpage>141</fpage><pub-id pub-id-type="doi">10.1038/s43856-023-00370-1</pub-id><pub-id pub-id-type="medline">37816837</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cotton</surname><given-names>DRE</given-names> </name><name name-style="western"><surname>Cotton</surname><given-names>PA</given-names> </name><name name-style="western"><surname>Shipway</surname><given-names>JR</given-names> </name></person-group><article-title>Chatting and cheating: ensuring academic integrity in the era of ChatGPT</article-title><source>Innov Educ Teach Int</source><year>2023</year><volume>61</volume><issue>2</issue><fpage>228</fpage><lpage>239</lpage><pub-id pub-id-type="doi">10.1080/14703297.2023.2190148</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Brew</surname><given-names>M</given-names> </name><name name-style="western"><surname>Taylor</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lam</surname><given-names>R</given-names> </name><name name-style="western"><surname>Havemann</surname><given-names>L</given-names> </name><name name-style="western"><surname>Nerantzi</surname><given-names>C</given-names> </name></person-group><article-title>Towards developing AI literacy: three student provocations on AI in higher education</article-title><source>Asian J Distance Educ</source><year>2023</year><volume>18</volume><issue>2</issue><fpage>1</fpage><lpage>11</lpage><pub-id pub-id-type="doi">10.5281/zenodo.8032387</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stokel-Walker</surname><given-names>C</given-names> </name></person-group><article-title>AI bot ChatGPT writes smart essays - should professors worry?</article-title><source>Nature</source><year>2022</year><month>12</month><day>9</day><pub-id pub-id-type="doi">10.1038/d41586-022-04397-7</pub-id><pub-id pub-id-type="medline">36494443</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Mollick</surname><given-names>ER</given-names> </name><name name-style="western"><surname>Mollick</surname><given-names>L</given-names> </name></person-group><article-title>Assigning AI: seven approaches for students, with prompts</article-title><source>SSRN</source><comment>Preprint posted online on  Jun 21, 2023</comment><pub-id pub-id-type="doi">10.2139/ssrn.4475995</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dwivedi</surname><given-names>YK</given-names> </name><name name-style="western"><surname>Kshetri</surname><given-names>N</given-names> </name><name name-style="western"><surname>Hughes</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Opinion Paper: &#x201C;So what if ChatGPT wrote it?&#x201D; Multidisciplinary perspectives on opportunities, challenges and implications of generative conversational AI for research, practice and policy</article-title><source>Int J Inf Manage</source><year>2023</year><month>08</month><volume>71</volume><fpage>102642</fpage><pub-id pub-id-type="doi">10.1016/j.ijinfomgt.2023.102642</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Han</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Battaglia</surname><given-names>F</given-names> </name><name name-style="western"><surname>Udaiyar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Fooks</surname><given-names>A</given-names> </name><name name-style="western"><surname>Terlecky</surname><given-names>SR</given-names> </name></person-group><article-title>An explorative assessment of ChatGPT as an aid in medical education: use it with caution</article-title><source>Med Teach</source><year>2024</year><month>05</month><volume>46</volume><issue>5</issue><fpage>657</fpage><lpage>664</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2023.2271159</pub-id><pub-id pub-id-type="medline">37862566</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cheung</surname><given-names>BHH</given-names> </name><name name-style="western"><surname>Lau</surname><given-names>GKK</given-names> </name><name name-style="western"><surname>Wong</surname><given-names>GTC</given-names> </name><etal/></person-group><article-title>ChatGPT versus human in generating medical graduate exam multiple choice questions-a multinational prospective study (Hong Kong S.A.R., Singapore, Ireland, and the United Kingdom)</article-title><source>PLoS One</source><year>2023</year><volume>18</volume><issue>8</issue><fpage>e0290691</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0290691</pub-id><pub-id pub-id-type="medline">37643186</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gao</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Howard</surname><given-names>FM</given-names> </name><name name-style="western"><surname>Markov</surname><given-names>NS</given-names> </name><etal/></person-group><article-title>Comparing scientific abstracts generated by ChatGPT to real abstracts with detectors and blinded human reviewers</article-title><source>NPJ Digit Med</source><year>2023</year><month>04</month><day>26</day><volume>6</volume><issue>1</issue><fpage>75</fpage><pub-id pub-id-type="doi">10.1038/s41746-023-00819-6</pub-id><pub-id pub-id-type="medline">37100871</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Laupichler</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Aster</surname><given-names>A</given-names> </name><name name-style="western"><surname>Schirch</surname><given-names>J</given-names> </name><name name-style="western"><surname>Raupach</surname><given-names>T</given-names> </name></person-group><article-title>Artificial intelligence literacy in higher and adult education: a scoping literature review</article-title><source>Comput Educ Artif Intell</source><year>2022</year><volume>3</volume><fpage>100101</fpage><pub-id pub-id-type="doi">10.1016/j.caeai.2022.100101</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Issa</surname><given-names>WB</given-names> </name><name name-style="western"><surname>Shorbagi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Al-Sharman</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Shaping the future: perspectives on the Integration of Artificial Intelligence in health profession education: a multi-country survey</article-title><source>BMC Med Educ</source><year>2024</year><month>10</month><day>18</day><volume>24</volume><issue>1</issue><fpage>1166</fpage><pub-id pub-id-type="doi">10.1186/s12909-024-06076-9</pub-id><pub-id pub-id-type="medline">39425151</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sun</surname><given-names>L</given-names> </name><name name-style="western"><surname>Yin</surname><given-names>C</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Zhao</surname><given-names>W</given-names> </name></person-group><article-title>Artificial intelligence for healthcare and medical education: a systematic review</article-title><source>Am J Transl Res</source><year>2023</year><volume>15</volume><issue>7</issue><fpage>4820</fpage><lpage>4828</lpage><pub-id pub-id-type="medline">37560249</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dumi&#x0107;-&#x010C;ule</surname><given-names>I</given-names> </name><name name-style="western"><surname>Ore&#x0161;kovi&#x0107;</surname><given-names>T</given-names> </name><name name-style="western"><surname>Brklja&#x010D;i&#x0107;</surname><given-names>B</given-names> </name><name name-style="western"><surname>Kujund&#x017E;i&#x0107; Tiljak</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ore&#x0161;kovi&#x0107;</surname><given-names>S</given-names> </name></person-group><article-title>The importance of introducing artificial intelligence to the medical curriculum - assessing practitioners&#x2019; perspectives</article-title><source>Croat Med J</source><year>2020</year><month>10</month><day>31</day><volume>61</volume><issue>5</issue><fpage>457</fpage><lpage>464</lpage><pub-id pub-id-type="doi">10.3325/cmj.2020.61.457</pub-id><pub-id pub-id-type="medline">33150764</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pinto Dos Santos</surname><given-names>D</given-names> </name><name name-style="western"><surname>Giese</surname><given-names>D</given-names> </name><name name-style="western"><surname>Brodehl</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Medical students&#x2019; attitude towards artificial intelligence: a multicentre survey</article-title><source>Eur Radiol</source><year>2019</year><month>04</month><volume>29</volume><issue>4</issue><fpage>1640</fpage><lpage>1646</lpage><pub-id pub-id-type="doi">10.1007/s00330-018-5601-1</pub-id><pub-id pub-id-type="medline">29980928</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Boscardin</surname><given-names>CK</given-names> </name><name name-style="western"><surname>Gin</surname><given-names>B</given-names> </name><name name-style="western"><surname>Golde</surname><given-names>PB</given-names> </name><name name-style="western"><surname>Hauer</surname><given-names>KE</given-names> </name></person-group><article-title>ChatGPT and generative artificial intelligence for medical education: potential impact and opportunity</article-title><source>Acad Med</source><year>2024</year><month>01</month><day>1</day><volume>99</volume><issue>1</issue><fpage>22</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000005439</pub-id><pub-id pub-id-type="medline">37651677</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tlili</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shehata</surname><given-names>B</given-names> </name><name name-style="western"><surname>Adarkwah</surname><given-names>MA</given-names> </name><etal/></person-group><article-title>What if the devil is my guardian angel: ChatGPT as a case study of using chatbots in education</article-title><source>Smart Learn Environ</source><year>2023</year><volume>10</volume><issue>1</issue><pub-id pub-id-type="doi">10.1186/s40561-023-00237-x</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cross</surname><given-names>J</given-names> </name><name name-style="western"><surname>Robinson</surname><given-names>R</given-names> </name><name name-style="western"><surname>Devaraju</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Transforming medical education: assessing the integration of ChatGPT into faculty workflows at a Caribbean medical school</article-title><source>Cureus</source><year>2023</year><month>07</month><volume>15</volume><issue>7</issue><fpage>e41399</fpage><pub-id pub-id-type="doi">10.7759/cureus.41399</pub-id><pub-id pub-id-type="medline">37426402</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ooi</surname><given-names>KB</given-names> </name><name name-style="western"><surname>Tan</surname><given-names>GWH</given-names> </name><name name-style="western"><surname>Al-Emran</surname><given-names>M</given-names> </name><etal/></person-group><article-title>The potential of generative artificial intelligence across disciplines: perspectives and future directions</article-title><source>J Comput Inf Syst</source><year>2025</year><month>01</month><day>2</day><volume>65</volume><issue>1</issue><fpage>76</fpage><lpage>107</lpage><pub-id pub-id-type="doi">10.1080/08874417.2023.2261010</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>Wenger-Trayner</surname><given-names>E</given-names> </name><name name-style="western"><surname>Wenger-Trayner</surname><given-names>B</given-names> </name></person-group><article-title>An introduction to communities of practice: a brief overview of the concept and its uses</article-title><year>2015</year><access-date>2024-02-01</access-date><publisher-name>Social Learning Lab</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.wenger-trayner.com/wp-content/uploads/2022/06/15-06-Brief-introduction-to-communities-of-practice.pdf">https://www.wenger-trayner.com/wp-content/uploads/2022/06/15-06-Brief-introduction-to-communities-of-practice.pdf</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tong</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sainsbury</surname><given-names>P</given-names> </name><name name-style="western"><surname>Craig</surname><given-names>J</given-names> </name></person-group><article-title>Consolidated criteria for reporting qualitative research (COREQ): a 32-item checklist for interviews and focus groups</article-title><source>Int J Qual Health Care</source><year>2007</year><month>12</month><volume>19</volume><issue>6</issue><fpage>349</fpage><lpage>357</lpage><pub-id pub-id-type="doi">10.1093/intqhc/mzm042</pub-id><pub-id pub-id-type="medline">17872937</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name></person-group><article-title>Using thematic analysis in psychology</article-title><source>Qual Res Psychol</source><year>2006</year><month>01</month><volume>3</volume><issue>2</issue><fpage>77</fpage><lpage>101</lpage><pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Preiksaitis</surname><given-names>C</given-names> </name><name name-style="western"><surname>Rose</surname><given-names>C</given-names> </name></person-group><article-title>Opportunities, challenges, and future directions of generative artificial intelligence in medical education: scoping review</article-title><source>JMIR Med Educ</source><year>2023</year><month>10</month><day>20</day><volume>9</volume><fpage>e48785</fpage><pub-id pub-id-type="doi">10.2196/48785</pub-id><pub-id pub-id-type="medline">37862079</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Perkins</surname><given-names>M</given-names> </name></person-group><article-title>Academic integrity considerations of AI large language models in the post-pandemic era: ChatGPT and beyond</article-title><source>J Univ Teach Learn Pract</source><year>2023</year><volume>20</volume><issue>2</issue><pub-id pub-id-type="doi">10.53761/1.20.02.07</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wartman</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Combs</surname><given-names>CD</given-names> </name></person-group><article-title>Reimagining medical education in the age of AI</article-title><source>AMA J Ethics</source><year>2019</year><month>02</month><day>1</day><volume>21</volume><issue>2</issue><fpage>E146</fpage><lpage>E152</lpage><pub-id pub-id-type="doi">10.1001/amajethics.2019.146</pub-id><pub-id pub-id-type="medline">30794124</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Deignan</surname><given-names>T</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>S</given-names> </name></person-group><article-title>Educator perspectives on the use of alternative assessment methods within taught Masters programmes: an exploratory study using activity theory and Q methodology</article-title><source>Educ Rev</source><year>2016</year><volume>68</volume><issue>4</issue><fpage>379</fpage><lpage>402</lpage><pub-id pub-id-type="doi">10.1080/00131911.2015.1090399</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Swiecki</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Khosravi</surname><given-names>H</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Assessment in the age of artificial intelligence</article-title><source>Comput Educ Artif Intell</source><year>2022</year><volume>3</volume><fpage>100075</fpage><pub-id pub-id-type="doi">10.1016/j.caeai.2022.100075</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hsiao</surname><given-names>YP</given-names> </name><name name-style="western"><surname>Klijn</surname><given-names>N</given-names> </name><name name-style="western"><surname>Chiu</surname><given-names>MS</given-names> </name></person-group><article-title>Developing a framework to re-design writing assignment assessment for the era of Large Language Models</article-title><source>Learn Res Pract</source><year>2023</year><volume>9</volume><issue>2</issue><fpage>148</fpage><lpage>158</lpage><pub-id pub-id-type="doi">10.1080/23735082.2023.2257234</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hattingh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Northcote</surname><given-names>M</given-names> </name></person-group><article-title>Personalising online assessments: a systematic literature review</article-title><source>J Furth High Educ</source><year>2023</year><volume>47</volume><issue>10</issue><fpage>1420</fpage><lpage>1436</lpage><pub-id pub-id-type="doi">10.1080/0309877X.2023.2250743</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rudolph</surname><given-names>J</given-names> </name><name name-style="western"><surname>Tan</surname><given-names>S</given-names> </name><name name-style="western"><surname>Tana</surname><given-names>S</given-names> </name></person-group><article-title>ChatGPT: bullshit spewer or the end of traditional assessments in higher education?</article-title><source>J Appl Learn Teach</source><year>2023</year><volume>6</volume><issue>1</issue><pub-id pub-id-type="doi">10.37074/jalt.2023.6.1.9</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Zhai</surname><given-names>X</given-names> </name></person-group><article-title>ChatGPT user experience: implications for education</article-title><source>SSRN</source><comment>Preprint posted online on  Dec 27, 2022</comment><pub-id pub-id-type="doi">10.2139/ssrn.4312418</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Harrison</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Mathuews</surname><given-names>KB</given-names> </name></person-group><article-title>Three priorities for the future of online education</article-title><source>J Thought</source><year>2022</year><access-date>2026-04-15</access-date><volume>56</volume><issue>1/2</issue><fpage>3</fpage><lpage>16</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.jstor.org/stable/27184753">https://www.jstor.org/stable/27184753</ext-link></comment></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Risko</surname><given-names>EF</given-names> </name><name name-style="western"><surname>Gilbert</surname><given-names>SJ</given-names> </name></person-group><article-title>Cognitive offloading</article-title><source>Trends Cogn Sci</source><year>2016</year><month>09</month><volume>20</volume><issue>9</issue><fpage>676</fpage><lpage>688</lpage><pub-id pub-id-type="doi">10.1016/j.tics.2016.07.002</pub-id><pub-id pub-id-type="medline">27542527</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kramm</surname><given-names>N</given-names> </name><name name-style="western"><surname>McKenna</surname><given-names>S</given-names> </name></person-group><article-title>AI amplifies the tough question: what is higher education really for?</article-title><source>Teach High Educ</source><year>2023</year><volume>28</volume><issue>8</issue><fpage>2173</fpage><lpage>2178</lpage><pub-id pub-id-type="doi">10.1080/13562517.2023.2263839</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Byrne</surname><given-names>VL</given-names> </name><name name-style="western"><surname>Hogan</surname><given-names>E</given-names> </name><name name-style="western"><surname>Dhingra</surname><given-names>N</given-names> </name><name name-style="western"><surname>Anthony</surname><given-names>M</given-names> </name><name name-style="western"><surname>Gannon</surname><given-names>C</given-names> </name></person-group><article-title>An exploratory study of how novice instructors pivot to online assessments strategies</article-title><source>Distance Educ</source><year>2021</year><volume>42</volume><issue>2</issue><fpage>184</fpage><lpage>199</lpage><pub-id pub-id-type="doi">10.1080/01587919.2021.1911624</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Masters</surname><given-names>K</given-names> </name><name name-style="western"><surname>Benjamin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Agrawal</surname><given-names>A</given-names> </name><name name-style="western"><surname>MacNeill</surname><given-names>H</given-names> </name><name name-style="western"><surname>Pillow</surname><given-names>MT</given-names> </name><name name-style="western"><surname>Mehta</surname><given-names>N</given-names> </name></person-group><article-title>Twelve tips on creating and using custom GPTs to enhance health professions education</article-title><source>Med Teach</source><year>2024</year><month>06</month><volume>46</volume><issue>6</issue><fpage>752</fpage><lpage>756</lpage><pub-id pub-id-type="doi">10.1080/0142159X.2024.2305365</pub-id><pub-id pub-id-type="medline">38285894</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tax</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Doucette</surname><given-names>H</given-names> </name><name name-style="western"><surname>Neish</surname><given-names>NR</given-names> </name><name name-style="western"><surname>Maillet</surname><given-names>JP</given-names> </name></person-group><article-title>A model for cultivating dental hygiene faculty development within a community of practice</article-title><source>J Dent Educ</source><year>2012</year><month>03</month><volume>76</volume><issue>3</issue><fpage>311</fpage><lpage>321</lpage><pub-id pub-id-type="doi">10.1002/j.0022-0337.2012.76.3.tb05260.x</pub-id><pub-id pub-id-type="medline">22383599</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jenkins</surname><given-names>G</given-names> </name><name name-style="western"><surname>Palermo</surname><given-names>C</given-names> </name><name name-style="western"><surname>Clark</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Costello</surname><given-names>L</given-names> </name></person-group><article-title>Communities of practice to facilitate change in health professions education: a realist synthesis</article-title><source>Nurse Educ Today</source><year>2024</year><month>03</month><volume>134</volume><fpage>106091</fpage><pub-id pub-id-type="doi">10.1016/j.nedt.2024.106091</pub-id><pub-id pub-id-type="medline">38241962</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ahsan</surname><given-names>Z</given-names> </name></person-group><article-title>Integrating artificial intelligence into medical education: a narrative systematic review of current applications, challenges, and future directions</article-title><source>BMC Med Educ</source><year>2025</year><month>08</month><day>23</day><volume>25</volume><issue>1</issue><fpage>1187</fpage><pub-id pub-id-type="doi">10.1186/s12909-025-07744-0</pub-id><pub-id pub-id-type="medline">40849650</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gin</surname><given-names>BC</given-names> </name><name name-style="western"><surname>LaForge</surname><given-names>K</given-names> </name><name name-style="western"><surname>Burk-Rafel</surname><given-names>J</given-names> </name><name name-style="western"><surname>Boscardin</surname><given-names>CK</given-names> </name></person-group><article-title>Macy Foundation Innovation Report Part II: From hype to reality: innovators&#x2019; visions for navigating AI integration challenges in medical education</article-title><source>Acad Med</source><year>2025</year><month>09</month><day>1</day><volume>100</volume><issue>9S Suppl 1</issue><fpage>S22</fpage><lpage>S29</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000006117</pub-id><pub-id pub-id-type="medline">40479503</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Ma</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Song</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Balch</surname><given-names>JA</given-names> </name><name name-style="western"><surname>Ren</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Vellanki</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Promoting AI competencies for medical students: a scoping review on frameworks, programs, and tools</article-title><source>arXiv</source><comment>Preprint posted online on  Jul 10, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2407.18939</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Interview schedule.</p><media xlink:href="formative_v10i1e87121_app1.docx" xlink:title="DOCX File, 21 KB"/></supplementary-material><supplementary-material id="app2"><label>Checklist 1</label><p>COREQ checklist.</p><media xlink:href="formative_v10i1e87121_app2.pdf" xlink:title="PDF File, 652 KB"/></supplementary-material></app-group></back></article>