<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">Interact J Med Res</journal-id><journal-id journal-id-type="publisher-id">i-jmr</journal-id><journal-id journal-id-type="index">3</journal-id><journal-title>Interactive Journal of Medical Research</journal-title><abbrev-journal-title>Interact J Med Res</abbrev-journal-title><issn pub-type="epub">1929-073X</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v15i1e85266</article-id><article-id pub-id-type="doi">10.2196/85266</article-id><article-categories><subj-group subj-group-type="heading"><subject>Tutorial</subject></subj-group></article-categories><title-group><article-title>An Introduction to AI for Clinicians: Tutorial</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Lee</surname><given-names>Stephen B</given-names></name><degrees>MS, MD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Carter</surname><given-names>Alexis B</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Haider</surname><given-names>Muhammad Hamis</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Ko</surname><given-names>Seok-Bum</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib></contrib-group><aff id="aff1"><institution>Division of Infectious Diseases, University of Saskatchewan</institution><addr-line>1440-14th Avenue, Regina General Hospital, 2nd Floor Medical Office Wing, ID Clinic</addr-line><addr-line>Regina</addr-line><addr-line>SK</addr-line><country>Canada</country></aff><aff id="aff2"><institution>Department of Pathology and Laboratory Medicine, Emory University</institution><addr-line>Atlanta</addr-line><addr-line>GA</addr-line><country>United States</country></aff><aff id="aff3"><institution>Department of Electrical and Computer Engineering, University of Saskatchewan</institution><addr-line>Saskatoon</addr-line><addr-line>SK</addr-line><country>Canada</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Schwartz</surname><given-names>Amy</given-names></name></contrib><contrib contrib-type="editor"><name name-style="western"><surname>Balcarras</surname><given-names>Matthew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Fukuzawa</surname><given-names>Fumitoshi</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Kumar</surname><given-names>Rahul R</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Stephen B Lee, MS, MD, Division of Infectious Diseases, University of Saskatchewan, 1440-14th Avenue, Regina General Hospital, 2nd Floor Medical Office Wing, ID Clinic, Regina, SK, S4P 0W5, Canada, 1 3067664247; <email>leestephenz@gmail.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>30</day><month>3</month><year>2026</year></pub-date><volume>15</volume><elocation-id>e85266</elocation-id><history><date date-type="received"><day>04</day><month>10</month><year>2025</year></date><date date-type="rev-recd"><day>05</day><month>02</month><year>2026</year></date><date date-type="accepted"><day>06</day><month>02</month><year>2026</year></date></history><copyright-statement>&#x00A9; Stephen B Lee, Alexis B Carter, Muhammad Hamis Haider, Seok-Bum Ko. Originally published in the Interactive Journal of Medical Research (<ext-link ext-link-type="uri" xlink:href="https://www.i-jmr.org/">https://www.i-jmr.org/</ext-link>), 30.3.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in the Interactive Journal of Medical Research, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://www.i-jmr.org/">https://www.i-jmr.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://www.i-jmr.org/2026/1/e85266"/><abstract><p>Artificial intelligence (AI) is already fundamentally changing society, with medicine being no exception. AI will impact how we practice, how hospitals operate, and even the practice of medicine itself. The use of AI-based products has already begun, with examples including AI scribes and large language models such as ChatGPT. Work is ongoing to produce models that have specific functions within medicine, such as kidney injury prediction. However, transformative foundational work, such as AlphaFold (for protein structure prediction), also promises to completely change the way we approach medicine. Therefore, clinicians must develop a clear understanding of AI, not as an optional skill, but as a core competency of modern medical practice. This paper serves as a tutorial to guide medical professionals through the basic principles of AI. It will teach clinicians how to build a mental scaffold to understand and springboard into AI. The core parts of this paper are organized in steps, with additional relevant topics addressed in modules at the end of the paper. The core steps are meant to be read sequentially. To prepare the reader for the rest of the paper, this tutorial will first introduce what AI is and then cover some basic definitions needed to understand other concepts. The reader will then be ready to understand what deep learning is and the difference between supervised and unsupervised learning. Finally, the reader will go through how deep learning models learn. Separate modules on safety and clinical applications are also included. This tutorial is relevant to clinicians at all levels but may be particularly useful for practicing clinicians who are encountering AI tools integrated into their practices without previous formal education in the field. Users of this tutorial can refer to specific sections or read the entire paper.</p></abstract><kwd-group><kwd>medical education</kwd><kwd>machine learning</kwd><kwd>artificial intelligence</kwd><kwd>AI</kwd><kwd>deep learning</kwd></kwd-group></article-meta></front><body><sec id="s1"><title>Step 1: Understanding What Artificial Intelligence Is</title><p>Before delving into the concepts underlying artificial intelligence (AI), it is important to understand what AI means. AI is a broad term that generally refers to computer systems that can perform complex tasks historically associated with humans, such as human learning, comprehension, problem-solving, decision-making, creativity, and autonomy [<xref ref-type="bibr" rid="ref1">1</xref>]. In computer science, AI algorithms encompass a variety of methodologies. Most AI in the modern era is machine learning (ML) and even more specifically, deep learning (DL; <xref ref-type="fig" rid="figure1">Figure 1</xref>). While all ML is considered AI, not all AI is ML.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Venn diagram of artificial intelligence terminology.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="i-jmr_v15i1e85266_fig01.png"/></fig><p>Expert systems are an older paradigm of AI in which a subject matter expert&#x2019;s knowledge is hardcoded into intricate rule-based algorithms to simulate human decision-making (eg, &#x201C;if x condition, then y result&#x201D;). Well-known examples include the MYCIN system, designed by Edward Shortliffe in 1974 to predict antibiotic choice [<xref ref-type="bibr" rid="ref2">2</xref>], and traditional chess AI systems. Even famous examples such as Deep Blue (IBM Corp) relied on hardcoded knowledge [<xref ref-type="bibr" rid="ref3">3</xref>]. Expert systems and these hardcoded methodologies have been largely abandoned in the modern era due to the incredible effort required to develop them and the brittleness and inflexibility of the resultant systems. Limited examples of expert systems also exist in health care, such as older sepsis and drug interaction clinical decision support systems. AI is primarily ML currently. Rather than relying on hardcoding from experts, ML algorithms are trained to learn relationships and patterns in data. The terms DL and ML are often used interchangeably; however, it is worthwhile mentioning that DL is really a subset of ML. There are many types of ML that are not DL (eg, k-means clustering and decision trees). DL is discussed in depth in a following section.</p></sec><sec id="s2"><title>Step 2: Understanding Basic Definitions</title><p>Some level-setting is required to understand ML algorithms and models. ML <italic>algorithms</italic> are tools (eg, logistic regression) that are trained on data to create an ML <italic>model</italic>. ML models that are used for classification are frequently referred to as <italic>classifiers</italic>. The quality of the data used to train an algorithm is critical to the performance of the model. Each record or event in the dataset is referred to as an <italic>instance</italic>. Each aspect of an instance (eg, color, duration, and test result) used to train a model is known as a <italic>feature</italic>. In a simple set of data, instances would be rows in your data spreadsheet, while features would be the column headers. Many ML datasets can have thousands of instances and hundreds of features.</p><p>A <italic>label</italic> is the information on a particular feature for a certain instance in the dataset (eg, &#x201C;red&#x201D; for a feature of &#x201C;color,&#x201D; &#x201C;30 minutes&#x201D; for a feature of &#x201C;duration,&#x201D; and &#x201C;34 mg/dL&#x201D; for a feature of &#x201C;glucose level&#x201D;). A label generally refers to information that has been applied by a human or another algorithm based on manual or traditional analysis of the data and represents a classification, categorization, ranking, or answer to a question. For example, in a dataset of inpatients hospitalized for at least 30 days with a feature of &#x201C;patient developed a hospital-acquired infection during admission,&#x201D; the label would be &#x201C;positive&#x201D; if an infection was acquired and &#x201C;negative&#x201D; if none occurred, bearing in mind that the application of positive or negative would have to be done by a human who was analyzing the patient&#x2019;s data.</p><p>Of note is that data that are not manually labeled are simply referred to as data. One may also choose to use only a subset of the data in a dataset for training.</p></sec><sec id="s3"><title>Step 3: Differentiating Between Supervised and Unsupervised ML</title><p>A core initial concept is understanding the difference between 2 major categories of ML. In general, ML can be classified into supervised and unsupervised learning, although there are other categories or approaches such as reinforcement and transfer learning.</p><p>In <italic>supervised learning</italic>, data that have been labeled are fed into the ML algorithm for training. Once the algorithm has been trained, it is called a <italic>model</italic>. One of the greatest challenges in developing a high-quality model is being able to obtain a substantial amount of data (ie, usually thousands of instances) that have been accurately labeled. An example of supervised learning would be an algorithm trained on a dataset of chest X-rays and corresponding diagnoses [<xref ref-type="bibr" rid="ref4">4</xref>-<xref ref-type="bibr" rid="ref6">6</xref>]. In <italic>unsupervised learning</italic>, unlabeled data are fed into an algorithm, and the algorithm discovers relationships and groupings for itself. This difference is illustrated in <xref ref-type="fig" rid="figure2">Figure 2</xref>. An example of unsupervised learning is work in which an algorithm identified distinct clusters or groups of patients who had COVID-19 [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>].</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Illustration of (A) supervised and (B) unsupervised learning.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="i-jmr_v15i1e85266_fig02.png"/></fig><p>In <xref ref-type="fig" rid="figure2">Figure 2A</xref>, the shapes are labeled as circles and squares by a human. The dataset is fed into the algorithm, and through these labels, the machine learns which features contribute most to classifying the shapes. In <xref ref-type="fig" rid="figure2">Figure 2B</xref>, the machine is fed raw data without any labels. Through exploration and differences detected in the data among its features, the model learns that there are potentially 2 different categories of objects. Of note is that the model may not inherently recognize them as a circle or a square but rather as 2 distinct categories of objects.</p><p>Other forms of learning also exist, such as <italic>reinforcement learning</italic>. In reinforcement learning, an algorithm experiences an environment and takes an action. On the basis of this action, the model is given a reward or a punishment as feedback. The algorithm learns which patterns result in rewards vs punishments and adjusts its behavior accordingly. Furthermore, supervised and unsupervised learning exist on a continuum, and some forms of learning are a mixture of both (ie, some instances are labeled, while others are not). A full description of these concepts is beyond the scope of this paper.</p><p>Transfer learning is another method in which an algorithm is first trained on a very large but nonspecific set of data for the desired outcome and then uses the learned patterns on a smaller but more specific set of data to refine the algorithm into a model. Transfer learning is often used where high-quality labeled datasets specific to the subject area are limited, whereas broader, nonspecific datasets are more plentiful.</p></sec><sec id="s4"><title>Step 4: Defining DL and Neural Networks</title><p>A subcategory of ML is DL. This branch of ML is heavily focused on neural networks. <italic>Neural networks</italic> were first described in the mid-20th century [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref10">10</xref>] and were designed to emulate neural processes in the human nervous system. While foundational work has been ongoing for decades in the field [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref13">13</xref>], much early work was constrained by hardware and data availability. The emergence of powerful parallel computing, called graphics processing units; platforms to leverage graphics processing units; and the availability of large datasets have helped overcome these barriers [<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>Artificial nodes, called <italic>neurons</italic>, are connected in layers to form a network. Data are put into the input layer of the network; the network processes the data through one to many hidden layers and then provides results in the output layer.</p><p>DL specifically refers to ML done on neural networks with many layers (thus the term &#x201C;deep&#x201D;). There is no exact number of layers that is generally agreed upon as a threshold. However, common examples such as ResNet (Microsoft) and the architecture underlying ChatGPT (OpenAI) can contain a hundred layers and billions of parameters [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>].</p></sec><sec id="s5"><title>Step 5a: Walk-Through on How ML Algorithms Work</title><p>Next, this tutorial will lay out the process by which ML algorithms work, describe the process with an example, and then define other key terms. To learn how to properly predict, classify, or rank instances in supervised learning, an algorithm analyzes the data to determine which features contribute the most to the data labels. This learning process is called <italic>training</italic>. During training, the machine learns to adjust internal parameters, called <italic>weights</italic>, to produce the desired outputs. These weights correspond to individual neurons. This occurs through minimizing loss functions, in which the gap between the observed and expected prediction is minimized.</p><p>For example, consider a model designed to estimate the risk of candidiasis in hospitalized patients. The model may learn that features such as intensive care admission, fever, abdominal symptoms, or a normal white blood cell count are associated with either a higher or lower risk of candidiasis. Features will often be mapped in a complex fashion across a series of neurons and layers. Modification of the weights applied to each of these features influences the resulting model&#x2019;s output through complex interactions across many layers of analysis, allowing the model to learn patterns that relate input features to the target outcome. In simpler forms of ML, it is sometimes possible to determine which features most greatly contribute to an outcome; however, in DL, this is often difficult because of the number of nodes and layers involved. Therefore, in DL, the change in importance may not be directly interpretable.</p><p>Datasets are often large and contain many features, some of which logically have nothing to do with the outcome being examined. If the data used to train an algorithm contain these completely unrelated variables, the algorithm may make nonsensical associations, which can later result in spuriously wrong results. For example, it may associate the color of a hospital gown and/or patient&#x2019;s sandwich preference with overall length of stay. However, if there are different colors of hospital gowns or menu choices for patients in intensive care units vs regular floors that are not captured in the data, then the model may miss the confounding variable completely. These errors in model development are hard to detect in all ML and even harder in DL.</p></sec><sec id="s6"><title>Step 5b: Walk-Through of Basic ML Mathematics Using an Example&#x2014;Loss Function</title><p>To illustrate the mathematics of ML training, we can use an extremely simplified example with a continuous variable in supervised learning. It is noted that most modern DL requires massive sets of data. While specifics may change in other forms of ML, this example will illustrate general concepts. In this example, the dataset includes the trough levels of a nephrotoxic drug and the resultant measured (true) estimated glomerular filtration rate (eGFR) in patients. The measured eGFR is the labeled data in this supervised ML. An appropriate ML algorithm is selected that attempts to predict the eGFR based on drug levels (<xref ref-type="table" rid="table1">Table 1</xref>).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Sample data for a model predicting nephrotoxin toxicity.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Nephrotoxin level (mg/L)</td><td align="left" valign="bottom">True eGFR<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> (y; mL/min/1.73 m<sup>2</sup>)</td><td align="left" valign="bottom">Machine learning model&#x2019;s predicted eGFR (&#x0177;; mL/min/1.73 m<sup>2</sup>)</td></tr></thead><tbody><tr><td align="char" char="." valign="top">10</td><td align="char" char="." valign="top">100</td><td align="char" char="." valign="top">95</td></tr><tr><td align="char" char="." valign="top">13</td><td align="char" char="." valign="top">95</td><td align="char" char="." valign="top">92</td></tr><tr><td align="char" char="." valign="top">15</td><td align="char" char="." valign="top">100</td><td align="char" char="." valign="top">90</td></tr><tr><td align="char" char="." valign="top">20</td><td align="char" char="." valign="top">90</td><td align="char" char="." valign="top">85</td></tr><tr><td align="char" char="." valign="top">25</td><td align="char" char="." valign="top">80</td><td align="char" char="." valign="top">80</td></tr><tr><td align="char" char="." valign="top">30</td><td align="char" char="." valign="top">65</td><td align="char" char="." valign="top">75</td></tr><tr><td align="char" char="." valign="top">40</td><td align="char" char="." valign="top">55</td><td align="char" char="." valign="top">65</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>eGFR: estimated glomerular filtration rate.</p></fn></table-wrap-foot></table-wrap><p>The model produces a prediction (&#x0177;) based on the nephrotoxin level (x). The algorithm compares the predicted eGFR (&#x0177;) to the real measured value of eGFR (y). Graphically, the predicted values of eGFR (the orange line in <xref ref-type="fig" rid="figure3">Figure 3</xref>) are plotted against the true values (the blue line). To determine the pattern between the nephrotoxin level (x) and the measured value of eGFR (y), the algorithm calculates the loss function. Differences might appear small in most regions but noticeable in some. To capture a model&#x2019;s performance across all data points, we use this loss function, a mathematical function that quantifies the total error between predicted and observed values.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Sample data for a model predicting nephrotoxin level. eGFR: estimated glomerular filtration rate.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="i-jmr_v15i1e85266_fig03.png"/></fig><p>By convention, updates are made to the loss function after each batch in training. The model then adjusts the weights and tries again to determine, across all the instances in the dataset, which set of weights will produce the lowest loss function on average for the training data. With high-quality data that represent the full spectrum of possible cases that the model could be given, this helps ensure that the model is trained to perform well on average and does not result in spuriously wrong predictions. The specifics of how the algorithm calculates loss functions vary between algorithm types, with different functions being optimized for different tasks. For instance, the mean squared error, a common loss function, squares the difference between &#x0177; and y. By squaring the difference, it ensures negative loss values do not cancel positive values when summed, and it helps penalize severely wrong predictions.</p><p>In a model that performs well when it is fed new data (ie, generalizes well), &#x0177; and y will be similar for any new patient&#x2019;s nephrotoxin level fed into it. Models that do not generalize well may make large, nonsensical errors in prediction for a small subset of patients with a few specific differences in data points. This is why training a model on high-quality data that accurately represent the types of data it may encounter after deployment is critical.</p></sec><sec id="s7"><title>Step 6: Understanding the Concepts of Backpropagation and Gradient Descent</title><p>Another set of key concepts to understand is backpropagation and gradient descent. In DL, a model modifies the weights of specific neurons within a neural network to create its predictions. When data are input, they forward pass through the network. The initial run creates random weights, and the prediction ability of the initial model is likely poor. However, after this forward pass, the model evaluates its resultant loss function and attempts to minimize the loss through a process called gradient descent. In gradient descent, we assign a learning rate, which increments our point along the curve of the loss function (<xref ref-type="fig" rid="figure4">Figure 4</xref>). If the model discovers that the loss is increasing, it will move backward to decrease the loss. Conversely, if it discovers that the loss is decreasing, it will continue moving in that direction. In doing so, it eventually seeks out the local minima, thus minimizing the loss function and improving the model&#x2019;s predictive capacity.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Graphical illustrations of concepts.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="i-jmr_v15i1e85266_fig04.png"/></fig><p>Mathematically, gradient descent can be expressed as<inline-formula><mml:math id="ieqn1"><mml:mstyle><mml:mrow><mml:mstyle displaystyle="false"><mml:mi>w</mml:mi><mml:mo>=</mml:mo><mml:mi>w</mml:mi><mml:mo>&#x2212;</mml:mo><mml:mi>&#x03B7;</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:mfrac><mml:mo>)</mml:mo></mml:mrow></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula>. Remember that the derivative <inline-formula><mml:math id="ieqn2"><mml:mstyle><mml:mrow><mml:mstyle displaystyle="false"><mml:mo stretchy="false">(</mml:mo><mml:mfrac><mml:mrow><mml:mi>d</mml:mi><mml:mi>L</mml:mi></mml:mrow><mml:mrow><mml:mi>d</mml:mi><mml:mi>w</mml:mi></mml:mrow></mml:mfrac><mml:mo stretchy="false">)</mml:mo></mml:mstyle></mml:mrow></mml:mstyle></mml:math></inline-formula> of the loss function gives the gradient at any given point. This allows the model to know if the loss is increasing or decreasing. The learning rate (&#x03B7;) tells the model how much to move in each direction. Recall that the loss function quantifies the difference between the predicted and observed values, with the goal of identifying the lowest point and, thus, minimizing this difference. These concepts are illustrated again visually in <xref ref-type="fig" rid="figure4">Figure 4</xref>. The mean squared error of our example model&#x2019;s simple line (<xref ref-type="fig" rid="figure3">Figure 3</xref>) is a parabola, creating an easy visualization of gradient descent for illustration purposes. When &#x03B7; is set at too large a value, it is possible that the model will jump very far back, such that it misses the local minima. Conversely, too small a value of &#x03B7; may result in such minimal movement that the minima is never reached.</p><p>During the training process, to specifically update the gradient, the model will undergo a process called backpropagation. The trainer algorithm will go through the neural network and alter the weights of neurons with the intention of decreasing loss. This process ultimately results in better prediction ability of the ML model.</p></sec><sec id="s8"><title>Module 1: A Brief Explanation of AI Explainability and Safety</title><p>As AI becomes significantly more powerful and integrated into society, various risks and errors have been observed. A brief discussion of safety is outlined here, and there are many other publications available that dive into each of these in detail.</p><p>How an ML model determines its output (ie, resultant prediction or result given to a user) from input is often unclear. This refers to the &#x201C;black-box&#x201D; nature of all ML but particularly of DL and neural networks. A field called explainable AI has emerged and attempts to not only better understand how models make their predictions but also add components to the ML model that require the model to explain how it arrived at its result (ie, which features had the most impact) [<xref ref-type="bibr" rid="ref17">17</xref>].</p><p>For all AI, especially in medicine, it is imperative that model performance be checked for variations in outcomes that may indicate that human bias in the data has been promulgated or exacerbated by the model. AI algorithms are exquisitely sensitive pattern detection tools. As such, they can detect slight variations in data that resulted from human prejudice and bias. Worse, they can promulgate such bias into the model. Therefore, it is imperative that models be checked for differences in results that can be attributed to race, gender, socioeconomic status, religion, etc, as the presence of these elements in a model will cause the model to produce inaccurate results in certain groups of patients. Concrete examples include models having difficulty diagnosing dermatological conditions in those with darker skin tones [<xref ref-type="bibr" rid="ref18">18</xref>], and a software program that accidentally referred White patients over African American patients to receive special care [<xref ref-type="bibr" rid="ref19">19</xref>]. In the latter example, while the goal was to ensure patients received the care required, the algorithm was designed to predict whose care would cost more money, and it was found that less money was spent on African American patients despite having the same level of need [<xref ref-type="bibr" rid="ref19">19</xref>]. Furthermore, a study found that an AI system could learn to predict ethnicity from radiographs alone [<xref ref-type="bibr" rid="ref6">6</xref>]. There are now tools that assist in the detection of these elements, and models that include these elements need to be retrained on optimized data or otherwise mitigated to ensure the best care for all patients. Finally, risks associated with the infrastructure of AI also exist. As many models use a cloud-based model for computation and some public commercial large language models (LLMs) use input data to retrain models, clinicians need to be aware of where sensitive data are being sent and stored.</p><p>Now that the use of AI is rapidly becoming more pervasive, it is reasonable to think that the degree to which AI is set to perform autonomously (ie, without a human in the middle) will increase. Some publications have discussed safety measures that are required to mitigate risks as AI becomes more autonomous and integrated into systems. These risks range from tangible risks today to theoretical risks with more powerful models. Experts have classified these risks into 4 categories: misuse, misalignment, mistakes, and structural risks. Misuse occurs when users intentionally instruct an AI tool to behave in harmful ways (ie, the user is an adversary). Misalignment occurs when AI systems knowingly act against human intent (ie, the AI is an adversary). This includes intentional deception by AI. Mistakes occur when AI systems produce incorrect outputs without intentional wrongdoing, often because real-world data are complex and influenced by many contributing factors. Finally, structural risks may emerge whereby pervasive AI systems integrated into society cause harm through the actions of multiple independent agents in a multifactorial, multiagent fashion [<xref ref-type="bibr" rid="ref20">20</xref>].</p><p>A substantial body of ongoing research in AI focuses on ensuring and improving AI safety. For instance, studies focus on the effects of adversarial attacks on models to understand safety. Developers also attempt to build safeguards into models and use red teaming, where security professionals attempt to simulate attacks to determine robustness [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. Equally important are efforts to create nuanced and well-informed regulations and guidelines for development [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref21">21</xref>-<xref ref-type="bibr" rid="ref26">26</xref>].</p></sec><sec id="s9"><title>Module 2: Contemporary Clinical Applications of AI in Medicine</title><p>Health care is one of the most promising industries for AI. While disruptive-level work is underway, such as the ability to understand protein folding [<xref ref-type="bibr" rid="ref27">27</xref>], AI has numerous applications currently being used routinely in health care.</p><p>Documentation is recognized to often be excessive and contributory to physician burnout, with an American Medical Informatics Association survey finding that 73.26% of health care professionals believed the time spent was inappropriate, 77.42% reported after-hours work related to documentation, and 74.83% believed that documentation impedes patient care [<xref ref-type="bibr" rid="ref28">28</xref>]. Canadian data show similar findings, with physicians spending excessive amounts of time on administrative tasks that result in burnout [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>].</p><p>AI scribes are tools that can ambiently listen to patient interactions and automatically generate notes for physicians, reducing the administrative burden for physicians. Numerous companies have created offerings; however, in general, scribes use LLMs, models based on the transformer architecture (which uses an attention mechanism to process preceding information and learn relationships among them) [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Within the context of scribes, the LLM uses this mechanism of self-attention to help generate logical text. Thus, the same limitations of transformers and LLMs carry forward onto AI scribes. For example, in many LLMs, hallucinations are a concern, which are theorized to arise because of algorithms being rewarded for correct responses, thereby making guessing a more advantageous response than acknowledging uncertainty. Health care providers must be aware of these limitations of AI scribes and how they may potentially arise.</p><p>LLMs have also been used by vendors such as Epic to automatically extract information out of existing notes, such as creating discharge summaries, reading radiology reports, providing summaries, preparing tasks based on the note being created, and providing insights [<xref ref-type="bibr" rid="ref33">33</xref>]. They have also been used as chatbots to act in clerical roles and as search engines for medical knowledge [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. While LLMs such as ChatGPT (OpenAI), Claude (Anthropic), and Gemini (Google) are commonly used by the general public, OpenEvidence (OpenEvidence LLC) is trained specifically on medical literature, reducing errors and hallucinations [<xref ref-type="bibr" rid="ref36">36</xref>]. Models intended for use in research and science also exist, such as Perplexity (Perplexity AI) and Elicit (Elicit Research) [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref37">37</xref>]. Open source, medically-focused models without interfaces also exist, such as MedGemma (Google) [<xref ref-type="bibr" rid="ref38">38</xref>].</p><p>Under the broader definition discussed in this paper, AI has been used in diagnostics for decades using simple rule-based algorithms. However, more recently, ML-DL&#x2013;based approaches have begun to gain traction due to improved performance. Areas that have shown promise for ML-DL performance are radiology and pathology [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. In radiology, a DL software such as CINA-iPE (Avicenna.AI) has shown promise in detecting pulmonary embolisms [<xref ref-type="bibr" rid="ref41">41</xref>]. In pathology, ML has improved rapid patient diagnostics based on DNA methylation markers [<xref ref-type="bibr" rid="ref42">42</xref>]. Sepsis and cardiac arrest prediction have been an ongoing area of work [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. While results from this work are promising, due in part to interpretability, accuracy, and the potential for uncovering clinically irrelevant abnormalities (&#x201C;incidentalomas&#x201D;), it is unclear how clinicians should react to findings [<xref ref-type="bibr" rid="ref45">45</xref>]. The volume of AI tool uptake is increasing, and AI will inevitably impact the workflow of clinicians in the near future. AI could also support hospital infrastructure [<xref ref-type="bibr" rid="ref46">46</xref>].</p><p>Despite its potential benefit, AI integration into health care is still an evolving landscape. Regulation and guidance remain an important area of evolving work for health care AI, with numerous national and international bodies producing frameworks and guidelines for AI use [<xref ref-type="bibr" rid="ref47">47</xref>-<xref ref-type="bibr" rid="ref49">49</xref>]. Important questions that remain under debate include the responsibility for AI errors, with many institutions holding physicians ultimately accountable for clinical decisions, how to mitigate bias in health care AI propagated by inherent bias in training data, and the importance of privacy [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref50">50</xref>]. Furthermore, many studies have indicated that implementation into workflows can also be challenging, due to a lack of awareness and engagement by both patients and health care professionals, as well as logistical implementation challenges inherent to any health technology [<xref ref-type="bibr" rid="ref51">51</xref>].</p></sec><sec id="s10"><title>Future Directions</title><p>AI promises to be one of the most critical revolutions of human society, arguably on par with the industrial or even agricultural revolutions. AI will impact every area of human society, including health care.</p><p>While current implementations such as LLMs, predictive models such as convolutional neural networks, and the tools they have created (eg, AI scribes and ChatGPT) are influential, most of the society-changing work is being dedicated to creating artificial general intelligence and artificial superintelligence. While the exact definition of these terms, and even their possibility, is a matter of contention [<xref ref-type="bibr" rid="ref52">52</xref>], they generally refer to AI systems that are either as intelligent as or more intelligent than human beings across a wide array of domains and tasks.</p><p>In achieving this goal, AI will have implications on the role of humans in a post&#x2013;artificial general intelligence society. While there are critical concerns about human displacement, there is also a potential for creating abundance, reducing scarcity, and an ability to supercharge scientific discovery [<xref ref-type="bibr" rid="ref53">53</xref>].</p><p>Due to its importance, the authors believe it is important that clinicians receive structured educational content on the topic. Leaders could consider integrating formal foundational AI teachings into medical school curricula and, then in later years, providing a chance to discuss its implications and applications in health care. These sessions could also be incorporated into postgraduate education and into continuing medical education sessions offered by workplaces.</p></sec></body><back><ack><p>The authors declare the use of generative artificial intelligence (GAI) in the research and writing process. According to the Generative Artificial Intelligence Delegation Taxonomy (2025), the following tasks were delegated to GAI tools under full human supervision: proofreading; editing; adapting and adjusting emotional tone; and reformatting sentence structure in specific, limited portions of text. On occasion, ChatGPT was used as a search engine to find links to relevant references, with authors further reviewing and ensuring the accuracy of these references. The GAI tool used was ChatGPT (version 5.1; OpenAI). Responsibility for the final manuscript lies entirely with the authors. GAI tools are not listed as authors and do not bear responsibility for the final outcomes.</p></ack><notes><sec><title>Funding</title><p>No external financial support or grants were received from any public, commercial, not-for-profit, or other entity for any part of this work.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">DL</term><def><p>deep learning</p></def></def-item><def-item><term id="abb3">eGFR</term><def><p>estimated glomerular filtration rate</p></def></def-item><def-item><term id="abb4">LLM</term><def><p>large language model</p></def></def-item><def-item><term id="abb5">ML</term><def><p>machine learning</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><article-title>What is artificial intelligence (AI)?</article-title><source>IBM</source><access-date>2025-06-28</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.ibm.com/think/topics/artificial-intelligence">https://www.ibm.com/think/topics/artificial-intelligence</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Shortliffe</surname><given-names>EH</given-names> </name></person-group><article-title>A rule-based computer program for advising physicians regarding antimicrobial therapy selection</article-title><conf-name>ACM &#x2019;74: Proceedings of the 1974 Annual ACM Conference</conf-name><conf-date>Jan 1, 1974</conf-date><pub-id pub-id-type="doi">10.1145/1408800.1408906</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Campbell</surname><given-names>M</given-names> </name></person-group><article-title>Knowledge discovery in deep blue</article-title><source>Commun ACM</source><year>1999</year><month>11</month><volume>42</volume><issue>11</issue><fpage>65</fpage><lpage>67</lpage><pub-id pub-id-type="doi">10.1145/319382.319396</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>SB</given-names> </name></person-group><article-title>Development of a chest X-ray machine learning convolutional neural network model on a budget and using artificial intelligence explainability techniques to analyze patterns of machine learning inference</article-title><source>JAMIA Open</source><year>2024</year><month>07</month><volume>7</volume><issue>2</issue><fpage>ooae035</fpage><pub-id pub-id-type="doi">10.1093/jamiaopen/ooae035</pub-id><pub-id pub-id-type="medline">38699648</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>SB</given-names> </name></person-group><article-title>Gradual poisoning of a chest x-ray convolutional neural network with an adversarial attack and AI explainability methods</article-title><source>Sci Rep</source><year>2025</year><month>07</month><day>1</day><volume>15</volume><issue>1</issue><fpage>21779</fpage><pub-id pub-id-type="doi">10.1038/s41598-025-02294-3</pub-id><pub-id pub-id-type="medline">40593872</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gichoya</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Banerjee</surname><given-names>I</given-names> </name><name name-style="western"><surname>Bhimireddy</surname><given-names>AR</given-names> </name><etal/></person-group><article-title>AI recognition of patient race in medical imaging: a modelling study</article-title><source>Lancet Digit Health</source><year>2022</year><month>06</month><volume>4</volume><issue>6</issue><fpage>e406</fpage><lpage>e414</lpage><pub-id pub-id-type="doi">10.1016/S2589-7500(22)00063-2</pub-id><pub-id pub-id-type="medline">35568690</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Benito-Le&#x00F3;n</surname><given-names>J</given-names> </name><name name-style="western"><surname>Del Castillo</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Estirado</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ghosh</surname><given-names>R</given-names> </name><name name-style="western"><surname>Dubey</surname><given-names>S</given-names> </name><name name-style="western"><surname>Serrano</surname><given-names>JI</given-names> </name></person-group><article-title>Using unsupervised machine learning to identify age- and sex-independent severity subgroups among patients with COVID-19: observational longitudinal study</article-title><source>J Med Internet Res</source><year>2021</year><month>05</month><day>27</day><volume>23</volume><issue>5</issue><fpage>e25988</fpage><pub-id pub-id-type="doi">10.2196/25988</pub-id><pub-id pub-id-type="medline">33872186</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nalinthasnai</surname><given-names>N</given-names> </name><name name-style="western"><surname>Thammasudjarit</surname><given-names>R</given-names> </name><name name-style="western"><surname>Tassaneyasin</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Unsupervised machine learning clustering approach for hospitalized COVID-19 pneumonia patients</article-title><source>BMC Pulm Med</source><year>2025</year><month>02</month><day>8</day><volume>25</volume><issue>1</issue><fpage>70</fpage><pub-id pub-id-type="doi">10.1186/s12890-025-03536-w</pub-id><pub-id pub-id-type="medline">39923003</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rosenblatt</surname><given-names>F</given-names> </name></person-group><article-title>The perceptron: a probabilistic model for information storage and organization in the brain</article-title><source>Psychol Rev</source><year>1958</year><month>11</month><volume>65</volume><issue>6</issue><fpage>386</fpage><lpage>408</lpage><pub-id pub-id-type="doi">10.1037/h0042519</pub-id><pub-id pub-id-type="medline">13602029</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McCulloch</surname><given-names>WS</given-names> </name><name name-style="western"><surname>Pitts</surname><given-names>W</given-names> </name></person-group><article-title>A logical calculus of the ideas immanent in nervous activity</article-title><source>Bull Math Biophys</source><year>1943</year><month>12</month><volume>5</volume><issue>4</issue><fpage>115</fpage><lpage>133</lpage><pub-id pub-id-type="doi">10.1007/BF02478259</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rumelhart</surname><given-names>DE</given-names> </name><name name-style="western"><surname>Hinton</surname><given-names>GE</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>RJ</given-names> </name></person-group><article-title>Learning representations by back-propagating errors</article-title><source>Nature</source><year>1986</year><month>10</month><volume>323</volume><issue>6088</issue><fpage>533</fpage><lpage>536</lpage><pub-id pub-id-type="doi">10.1038/323533a0</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lippmann</surname><given-names>R</given-names> </name></person-group><article-title>An introduction to computing with neural nets</article-title><source>IEEE ASSP Mag</source><year>1987</year><volume>4</volume><issue>2</issue><fpage>4</fpage><lpage>22</lpage><pub-id pub-id-type="doi">10.1109/MASSP.1987.1165576</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ackley</surname><given-names>DH</given-names> </name><name name-style="western"><surname>Hinton</surname><given-names>GE</given-names> </name><name name-style="western"><surname>Sejnowski</surname><given-names>TJ</given-names> </name></person-group><article-title>A learning algorithm for Boltzmann machines</article-title><source>Cogn Sci</source><year>1985</year><month>03</month><volume>9</volume><issue>1</issue><fpage>147</fpage><lpage>169</lpage><pub-id pub-id-type="doi">10.1016/S0364-0213(85)80012-4</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>M</given-names> </name><name name-style="western"><surname>Bi</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Deep learning and machine learning with GPGPU and CUDA: unlocking the power of parallel computing</article-title><source>arXiv</source><comment>Preprint posted online on  Oct 8, 2024</comment><pub-id pub-id-type="doi">10.48550/arXiv.2410.05686</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>BD</given-names> </name><name name-style="western"><surname>Meng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Xie</surname><given-names>WY</given-names> </name><name name-style="western"><surname>Shao</surname><given-names>S</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name></person-group><article-title>Weighted spatial pyramid matching collaborative representation for remote-sensing-image scene classification</article-title><source>Remote Sens (Basel)</source><year>2019</year><volume>11</volume><issue>5</issue><fpage>518</fpage><pub-id pub-id-type="doi">10.3390/rs11050518</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Alarcon</surname><given-names>N</given-names> </name></person-group><article-title>OpenAI presents GPT-3, a 175 billion parameters language model</article-title><source>NVIDIA Developer</source><year>2020</year><month>07</month><day>7</day><access-date>2026-01-17</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://developer.nvidia.com/blog/openai-presents-gpt-3-a-175-billion-parameters-language-model/">https://developer.nvidia.com/blog/openai-presents-gpt-3-a-175-billion-parameters-language-model/</ext-link></comment></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Lundberg</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>SI</given-names> </name></person-group><article-title>A unified approach to interpreting model predictions</article-title><conf-name>Proceedings of the 31st International Conference on Neural Information Processing Systems</conf-name><conf-date>Dec 4-9, 2017</conf-date><pub-id pub-id-type="doi">10.5555/3295222.3295230</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dowie</surname><given-names>T</given-names> </name></person-group><article-title>Exploring the diagnostic capability of artificial intelligence in dermatology for darker skin tones: a narrative review</article-title><source>Cureus</source><year>2025</year><month>10</month><volume>17</volume><issue>10</issue><fpage>e94909</fpage><pub-id pub-id-type="doi">10.7759/cureus.94909</pub-id><pub-id pub-id-type="medline">41262830</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Obermeyer</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Powers</surname><given-names>B</given-names> </name><name name-style="western"><surname>Vogeli</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mullainathan</surname><given-names>S</given-names> </name></person-group><article-title>Dissecting racial bias in an algorithm used to manage the health of populations</article-title><source>Science</source><year>2019</year><month>10</month><day>25</day><volume>366</volume><issue>6464</issue><fpage>447</fpage><lpage>453</lpage><pub-id pub-id-type="doi">10.1126/science.aax2342</pub-id><pub-id pub-id-type="medline">31649194</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="web"><article-title>An approach to technical AGI safety and security</article-title><source>Medium</source><year>2025</year><access-date>2025-1-17</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://deepmindsafetyresearch.medium.com/an-approach-to-technical-agi-safety-and-security-25928819fbc6">https://deepmindsafetyresearch.medium.com/an-approach-to-technical-agi-safety-and-security-25928819fbc6</ext-link></comment></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Norgeot</surname><given-names>B</given-names> </name><name name-style="western"><surname>Quer</surname><given-names>G</given-names> </name><name name-style="western"><surname>Beaulieu-Jones</surname><given-names>BK</given-names> </name><etal/></person-group><article-title>Minimum information about clinical artificial intelligence modeling: the MI-CLAIM checklist</article-title><source>Nat Med</source><year>2020</year><month>09</month><volume>26</volume><issue>9</issue><fpage>1320</fpage><lpage>1324</lpage><pub-id pub-id-type="doi">10.1038/s41591-020-1041-y</pub-id><pub-id pub-id-type="medline">32908275</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="web"><article-title>Predetermined change control plans for machine learning-enabled medical devices: guiding principles</article-title><source>U.S. Food &#x0026; Drug Administration</source><year>2025</year><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.fda.gov/medical-devices/software-medical-device-samd/predetermined-change-control-plans-machine-learning-enabled-medical-devices-guiding-principles">https://www.fda.gov/medical-devices/software-medical-device-samd/predetermined-change-control-plans-machine-learning-enabled-medical-devices-guiding-principles</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="web"><article-title>Transparency for machine learning-enabled medical devices: guiding principles</article-title><source>U.S. Food &#x0026; Drug Administration</source><year>2024</year><month>06</month><day>13</day><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.fda.gov/medical-devices/software-medical-device-samd/transparency-machine-learning-enabled-medical-devices-guiding-principles">https://www.fda.gov/medical-devices/software-medical-device-samd/transparency-machine-learning-enabled-medical-devices-guiding-principles</ext-link></comment></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><article-title>Good machine learning practice for medical device development: guiding principles</article-title><source>U.S. Food &#x0026; Drug Administration</source><year>2025</year><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.fda.gov/medical-devices/software-medical-device-samd/good-machine-learning-practice-medical-device-development-guiding-principles">https://www.fda.gov/medical-devices/software-medical-device-samd/good-machine-learning-practice-medical-device-development-guiding-principles</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="web"><article-title>Engineering fundamentals checklist</article-title><source>Microsoft Open Source</source><access-date>2026-01-17</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://microsoft.github.io/code-with-engineering-playbook/engineering-fundamentals-checklist/">https://microsoft.github.io/code-with-engineering-playbook/engineering-fundamentals-checklist/</ext-link></comment></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Koh</surname><given-names>RGL</given-names> </name><name name-style="western"><surname>Khan</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Rashidiani</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Check it before you wreck it: a guide to STAR-ML for screening machine learning reporting in research</article-title><source>IEEE Access</source><year>2023</year><volume>11</volume><fpage>101567</fpage><lpage>101579</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2023.3316019</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jumper</surname><given-names>J</given-names> </name><name name-style="western"><surname>Evans</surname><given-names>R</given-names> </name><name name-style="western"><surname>Pritzel</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Highly accurate protein structure prediction with AlphaFold</article-title><source>Nature</source><year>2021</year><month>08</month><volume>596</volume><issue>7873</issue><fpage>583</fpage><lpage>589</lpage><pub-id pub-id-type="doi">10.1038/s41586-021-03819-2</pub-id><pub-id pub-id-type="medline">34265844</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sloss</surname><given-names>EA</given-names> </name><name name-style="western"><surname>Owoyemi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mishra</surname><given-names>AK</given-names> </name><etal/></person-group><article-title>Development of the TrendBurden survey: assessing perceived documentation burden among health professionals in the United States</article-title><source>Appl Clin Inform</source><year>2025</year><month>05</month><volume>16</volume><issue>3</issue><fpage>662</fpage><lpage>675</lpage><pub-id pub-id-type="doi">10.1055/a-2562-0910</pub-id><pub-id pub-id-type="medline">40669861</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="report"><article-title>Physician administrative burden survey &#x2013;final report</article-title><year>2020</year><month>09</month><access-date>2026-02-25</access-date><publisher-name>Doctors Nova Scotia</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://doctorsns.com/sites/default/files/2020-11/admin-burden-survey-results.pdf">https://doctorsns.com/sites/default/files/2020-11/admin-burden-survey-results.pdf</ext-link></comment></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="web"><article-title>Joint Task Force to reduce administrative burdens on physicians</article-title><source>Doctors Manitoba</source><year>2023</year><month>05</month><day>30</day><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://assets.doctorsmanitoba.ca/documents/Admin-Burden-Progress-Report-May-30.pdf">https://assets.doctorsmanitoba.ca/documents/Admin-Burden-Progress-Report-May-30.pdf</ext-link></comment></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Acallar</surname><given-names>LJ</given-names> </name></person-group><article-title>AI medical scribes: everything you need to know</article-title><source>Heidi</source><year>2026</year><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.heidihealth.com/blog/ai-medical-scribe?utm_source=chatgpt.com">https://www.heidihealth.com/blog/ai-medical-scribe?utm_source=chatgpt.com</ext-link></comment></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Vaswani</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shazeer</surname><given-names>N</given-names> </name><name name-style="western"><surname>Parmar</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Attention is all you need</article-title><access-date>2026-02-25</access-date><conf-name>31st Conference on Neural Information Processing Systems (NIPS 2017)</conf-name><conf-date>Dec 4-7, 2017</conf-date><comment><ext-link ext-link-type="uri" xlink:href="https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf">https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf</ext-link></comment></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="web"><article-title>AI for clinicians</article-title><source>Epic</source><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.epic.com/software/ai-clinicians/">https://www.epic.com/software/ai-clinicians/</ext-link></comment></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="web"><article-title>AI receptionist answers the call for busy medical clinics</article-title><source>Hamilton Health Sciences</source><access-date>2026-03-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://www.hamiltonhealthsciences.ca/share/patients-test-ai-receptionist">www.hamiltonhealthsciences.ca/share/patients-test-ai-receptionist</ext-link></comment></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="web"><source>Open Evidence</source><access-date>2026-03-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://www.openevidence.com">www.openevidence.com</ext-link></comment></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="web"><article-title>AI for scienti&#xFB01;c research</article-title><source>Elicit</source><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://elicit.com">https://elicit.com</ext-link></comment></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="web"><source>Perplexity</source><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.perplexity.ai">https://www.perplexity.ai</ext-link></comment></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="web"><article-title>Google DeepMind</article-title><source>MedGemma</source><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://deepmind.google/models/gemma/medgemma">https://deepmind.google/models/gemma/medgemma</ext-link></comment></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lawrence</surname><given-names>R</given-names> </name><name name-style="western"><surname>Dodsworth</surname><given-names>E</given-names> </name><name name-style="western"><surname>Massou</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Artificial intelligence for diagnostics in radiology practice: a rapid systematic scoping review</article-title><source>EClinicalMedicine</source><year>2025</year><month>05</month><day>12</day><volume>83</volume><fpage>103228</fpage><pub-id pub-id-type="doi">10.1016/j.eclinm.2025.103228</pub-id><pub-id pub-id-type="medline">40474995</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cazzato</surname><given-names>G</given-names> </name><name name-style="western"><surname>Rongioletti</surname><given-names>F</given-names> </name></person-group><article-title>Artificial intelligence in dermatopathology: updates, strengths, and challenges</article-title><source>Clin Dermatol</source><year>2024</year><volume>42</volume><issue>5</issue><fpage>437</fpage><lpage>442</lpage><pub-id pub-id-type="doi">10.1016/j.clindermatol.2024.06.010</pub-id><pub-id pub-id-type="medline">38909860</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Farzaneh</surname><given-names>H</given-names> </name><name name-style="western"><surname>Junn</surname><given-names>J</given-names> </name><name name-style="western"><surname>Chaibi</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Deep learning-based algorithm for automatic detection of incidental pulmonary embolism on contrast-enhanced CT: a multicenter multivendor study</article-title><source>Radiol Adv</source><year>2025</year><month>06</month><day>23</day><volume>2</volume><issue>4</issue><fpage>umaf021</fpage><pub-id pub-id-type="doi">10.1093/radadv/umaf021</pub-id><pub-id pub-id-type="medline">41058960</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aref-Eshghi</surname><given-names>E</given-names> </name><name name-style="western"><surname>Abadi</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Farhadieh</surname><given-names>ME</given-names> </name><etal/></person-group><article-title>DNA methylation and machine learning: challenges and perspective toward enhanced clinical diagnostics</article-title><source>Clin Epigenetics</source><year>2025</year><month>10</month><day>10</day><volume>17</volume><issue>1</issue><fpage>170</fpage><pub-id pub-id-type="doi">10.1186/s13148-025-01967-0</pub-id><pub-id pub-id-type="medline">41074112</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chang</surname><given-names>WS</given-names> </name><name name-style="western"><surname>Hsiao</surname><given-names>KY</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>LY</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>M</given-names> </name><name name-style="western"><surname>Shia</surname><given-names>BC</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>CY</given-names> </name></person-group><article-title>Machine learning models for predicting in-hospital cardiac arrest: a comparative analysis with logistic regression</article-title><source>Int J Gen Med</source><year>2025</year><volume>18</volume><fpage>6341</fpage><lpage>6352</lpage><pub-id pub-id-type="doi">10.2147/IJGM.S569559</pub-id><pub-id pub-id-type="medline">41141891</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Drysch</surname><given-names>M</given-names> </name><name name-style="western"><surname>Reinkemeier</surname><given-names>F</given-names> </name><name name-style="western"><surname>Puscz</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Streamlined machine learning model for early sepsis risk prediction in burn patients</article-title><source>NPJ Digit Med</source><year>2025</year><month>10</month><day>21</day><volume>8</volume><issue>1</issue><fpage>621</fpage><pub-id pub-id-type="doi">10.1038/s41746-025-02078-z</pub-id><pub-id pub-id-type="medline">41120704</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moss</surname><given-names>L</given-names> </name><name name-style="western"><surname>Corsar</surname><given-names>D</given-names> </name><name name-style="western"><surname>Shaw</surname><given-names>M</given-names> </name><name name-style="western"><surname>Piper</surname><given-names>I</given-names> </name><name name-style="western"><surname>Hawthorne</surname><given-names>C</given-names> </name></person-group><article-title>Demystifying the black box: the importance of interpretability of predictive models in neurocritical care</article-title><source>Neurocrit Care</source><year>2022</year><month>08</month><volume>37</volume><issue>Suppl 2</issue><fpage>185</fpage><lpage>191</lpage><pub-id pub-id-type="doi">10.1007/s12028-022-01504-4</pub-id><pub-id pub-id-type="medline">35523917</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kumar</surname><given-names>AK</given-names> </name><name name-style="western"><surname>Ali</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kumar</surname><given-names>RR</given-names> </name><name name-style="western"><surname>Assaf</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Ilyas</surname><given-names>S</given-names> </name></person-group><article-title>Artificial intelligent and internet of things framework for sustainable hazardous waste management in hospitals</article-title><source>Waste Manag</source><year>2025</year><month>07</month><day>15</day><volume>203</volume><fpage>114816</fpage><pub-id pub-id-type="doi">10.1016/j.wasman.2025.114816</pub-id><pub-id pub-id-type="medline">40311410</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="web"><article-title>Ethics and governance of artificial intelligence for health: WHO guidance</article-title><source>World Health Organization</source><year>2021</year><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/publications/i/item/9789240029200">https://www.who.int/publications/i/item/9789240029200</ext-link></comment></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="web"><article-title>The medico-legal lens on AI use by Canadian physicians</article-title><source>Canadian Medical Protective Association</source><year>2024</year><month>09</month><access-date>2026-02-25</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.cmpa-acpm.ca/en/research-policy/public-policy/the-medico-legal-lens-on-ai-use-by-canadian-physicians">https://www.cmpa-acpm.ca/en/research-policy/public-policy/the-medico-legal-lens-on-ai-use-by-canadian-physicians</ext-link></comment></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Solomonides</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Koski</surname><given-names>E</given-names> </name><name name-style="western"><surname>Atabaki</surname><given-names>SM</given-names> </name><etal/></person-group><article-title>Defining AMIA&#x2019;s artificial intelligence principles</article-title><source>J Am Med Inform Assoc</source><year>2022</year><month>03</month><day>15</day><volume>29</volume><issue>4</issue><fpage>585</fpage><lpage>591</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocac006</pub-id><pub-id pub-id-type="medline">32722749</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="web"><article-title>Artificial intelligence (AI) in medical practice</article-title><source>College of Physicians and Surgeons of Saskatchewan</source><access-date>2026-01-19</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.cps.sk.ca/imis/ContentBuddyDownload.aspx?DocumentVersionKey=879928d2-6caa-4894-a866-86fbea85ce57">https://www.cps.sk.ca/imis/ContentBuddyDownload.aspx?DocumentVersionKey=879928d2-6caa-4894-a866-86fbea85ce57</ext-link></comment></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Livieri</surname><given-names>G</given-names> </name><name name-style="western"><surname>Mangina</surname><given-names>E</given-names> </name><name name-style="western"><surname>Protopapadakis</surname><given-names>ED</given-names> </name><name name-style="western"><surname>Panayiotou</surname><given-names>AG</given-names> </name></person-group><article-title>The gaps and challenges in digital health technology use as perceived by patients: a scoping review and narrative meta-synthesis</article-title><source>Front Digit Health</source><year>2025</year><month>03</month><day>27</day><volume>7</volume><fpage>1474956</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2025.1474956</pub-id><pub-id pub-id-type="medline">40212901</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="other"><person-group person-group-type="author"><name name-style="western"><surname>Morris</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Sohl-Dickstein</surname><given-names>J</given-names> </name><name name-style="western"><surname>Fiedel</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Levels of AGI for operationalizing progress on the path to AGI</article-title><publisher-name>arXiv</publisher-name><comment>Preprint posted online on  Nov 4, 2023</comment><pub-id pub-id-type="doi">10.48550/arXiv.2311.02462</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Xu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Cao</surname><given-names>X</given-names> </name><etal/></person-group><article-title>Artificial intelligence: a powerful paradigm for scientific research</article-title><source>Innovation (Camb)</source><year>2021</year><month>10</month><day>28</day><volume>2</volume><issue>4</issue><fpage>100179</fpage><pub-id pub-id-type="doi">10.1016/j.xinn.2021.100179</pub-id><pub-id pub-id-type="medline">34877560</pub-id></nlm-citation></ref></ref-list></back></article>