<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id><journal-id journal-id-type="publisher-id">mental</journal-id><journal-id journal-id-type="index">16</journal-id><journal-title>JMIR Mental Health</journal-title><abbrev-journal-title>JMIR Ment Health</abbrev-journal-title><issn pub-type="epub">2368-7959</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e81970</article-id><article-id pub-id-type="doi">10.2196/81970</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Physician Perspectives on the Impact of Artificial Intelligence on the Therapeutic Relationship in Mental Health Care: Qualitative Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Weir</surname><given-names>Isabel B</given-names></name><degrees>BA</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Stroud</surname><given-names>Austin M</given-names></name><degrees>MA</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Stout</surname><given-names>Jeremiah J</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Barry</surname><given-names>Barbara A</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Athreya</surname><given-names>Arjun P</given-names></name><degrees>MS, PhD</degrees><xref ref-type="aff" rid="aff6">6</xref><xref ref-type="aff" rid="aff7">7</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Bobo</surname><given-names>William V</given-names></name><degrees>MPH, MD</degrees><xref ref-type="aff" rid="aff8">8</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Sharp</surname><given-names>Richard R</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Biomedical Ethics Program, Mayo Clinic</institution><addr-line>200 First Street SW</addr-line><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><aff id="aff2"><institution>Radcliffe Humanities, Faculty of Philosophy, University of Oxford</institution><addr-line>Oxford</addr-line><country>United Kingdom</country></aff><aff id="aff3"><institution>Alix School of Medicine, Mayo Clinic</institution><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><aff id="aff4"><institution>Department of Psychiatry and Behavioral Sciences, University of Washington</institution><addr-line>Seattle</addr-line><addr-line>WA</addr-line><country>United States</country></aff><aff id="aff5"><institution>Robert D. and Patricia E. Kern Center for the Science of Health Care Delivery, Mayo Clinic</institution><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><aff id="aff6"><institution>Department of Molecular Pharmacology and Experimental Therapeutics, Mayo Clinic</institution><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><aff id="aff7"><institution>Department of Psychiatry and Psychology, Mayo Clinic</institution><addr-line>Rochester</addr-line><addr-line>MN</addr-line><country>United States</country></aff><aff id="aff8"><institution>Department of Behavioral Sciences &#x0026; Social Medicine, College of Medicine, Florida State University</institution><addr-line>FL</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Torous</surname><given-names>John</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Mansoor</surname><given-names>Masab</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Ancillotti</surname><given-names>Mirko</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Richard R Sharp, PhD, Biomedical Ethics Program, Mayo Clinic, 200 First Street SW, Rochester, MN, 55905, United States, 1 507-538-6502; <email>sharp.richard@mayo.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>30</day><month>12</month><year>2025</year></pub-date><volume>12</volume><elocation-id>e81970</elocation-id><history><date date-type="received"><day>06</day><month>08</month><year>2025</year></date><date date-type="rev-recd"><day>03</day><month>12</month><year>2025</year></date><date date-type="accepted"><day>04</day><month>12</month><year>2025</year></date></history><copyright-statement>&#x00A9; Isabel B Weir, Austin M Stroud, Jeremiah J Stout, Barbara A Barry, Arjun P Athreya, William V Bobo, Richard R Sharp. Originally published in JMIR Mental Health (<ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org">https://mental.jmir.org</ext-link>), 30.12.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org/">https://mental.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mental.jmir.org/2025/1/e81970"/><abstract><sec><title>Background</title><p>The therapeutic relationship is a professional partnership between clinicians and patients that supports open communication and clinical decision-making. This relationship is critical to the delivery of effective mental health care. The integration of artificial intelligence (AI) into mental health care has the potential to support accessibility and personalized care; however, little is known about how AI might affect the dynamics of the therapeutic relationship.</p></sec><sec><title>Objective</title><p>This study aimed to ascertain how physicians anticipate AI tools will impact the therapeutic relationship in mental health care.</p></sec><sec sec-type="methods"><title>Methods</title><p>We conducted 42 in-depth interviews with psychiatrists and family medicine practitioners to investigate physician perceptions regarding the impact of AI on mental health care.</p></sec><sec sec-type="results"><title>Results</title><p>Physicians identified several disruptions from AI use, noting that these tools could impact the dyad of the patient-physician relationship in ways that are both positive and negative. The main themes that emerged included potential disruptions to the therapeutic relationship, shifts in shared decision-making dynamics, and the importance of transparent AI use. Participants suggested that AI tools could create efficiencies that allow for relationship building as well as help avoid issues with miscommunication during psychotherapeutic interactions. However, they also expressed concerns that AI tools might not adequately capture aspects of the therapeutic relationship, such as empathy, that are vital to mental health care. Physicians also raised issues related to the impact that AI tools will have on maintaining relationships with patients.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>As AI applications become increasingly integrated into mental health care, it is crucial to assess how this integration may support or disrupt the therapeutic relationship. Physician acceptance of emerging AI tools may be highly dependent on how well the human elements of mental health care are preserved.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>AI</kwd><kwd>mental health</kwd><kwd>therapeutic relationship</kwd><kwd>patient-physician relationship</kwd><kwd>communication</kwd><kwd>interviews</kwd><kwd>family medicine</kwd><kwd>psychiatry</kwd><kwd>qualitative</kwd><kwd>providers</kwd><kwd>physicians</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The therapeutic relationship encompasses the goals, tasks, and connection of the patient-physician dyad and has been cited by patients as integral to what defines high-quality mental health care and medical care, more broadly [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>]. In discussing the therapeutic relationship, we refer to the overarching interpersonal dynamics between physicians and patients. This is inclusive of (although not limited to) the <italic>therapeutic alliance</italic>, a concept emphasized in mental health contexts that characterizes the collaborative partnership focused on achieving treatment goals [<xref ref-type="bibr" rid="ref2">2</xref>]. In this paper, we use the term therapeutic relationship with the understanding that it encompasses the therapeutic alliance. In mental health care, the therapeutic relationship is characterized by empathy, cooperation, and support and is associated with increased medication compliance and positive outcomes [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. Valued by both patients and clinicians, the therapeutic relationship is central to some professional codes of conduct [<xref ref-type="bibr" rid="ref1">1</xref>]. It is the professional gold standard for patient-physician interaction, with documented advantages over other models in the establishment of patient trust in care providers, care satisfaction, treatment buy-in, and adherence [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>The integration of artificial intelligence (AI) into the care of mental health conditions has the potential to expand the therapeutic relationship, but with ethically complex implications [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref10">10</xref>]. While advances in digital health technology are predicted to increase the quality of care and strengthen personalized psychiatry [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>], AI advances may disrupt patient-physician interactions that have been conventionally viewed as building blocks of a therapeutic relationship [<xref ref-type="bibr" rid="ref13">13</xref>]. For instance, AI-enabled chatbots aimed at providing behavioral support can potentially expand access to care but also create potential risks related to therapeutic misconception and inauthentic therapeutic relationships [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. Similar concerns have been identified in patient-centered investigations, suggesting preferences for human involvement in therapy-based interactions [<xref ref-type="bibr" rid="ref17">17</xref>] and a critical role for authenticity in health care contexts [<xref ref-type="bibr" rid="ref18">18</xref>]. Furthermore, access to AI-supported clinical recommendations&#x2014;by both physicians and patients&#x2014;can influence communication with the potential to support and limit shared decision-making [<xref ref-type="bibr" rid="ref19">19</xref>]. As AI tools are increasingly being developed and studied in the context of psychiatric care [<xref ref-type="bibr" rid="ref20">20</xref>], their role in clinical assessment and emotional counseling could potentially impact the dynamics of patient-physician interactions [<xref ref-type="bibr" rid="ref7">7</xref>]. For all their conveniences, AI tools may disrupt relationships between physicians and patients [<xref ref-type="bibr" rid="ref21">21</xref>].</p><p>Several studies have sought to explore clinician perspectives regarding the use of AI and digital technology in mental health care. These studies report that many clinicians view empathetic care as a uniquely human skill that digital technologies are incapable of replicating [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref25">25</xref>]. In a therapeutic context, many clinicians believe that these technologies cannot engage in a personal relationship with patients and that removal of this relationship would omit the most effective aspect of mental health care [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>]. Clinicians also note the importance of behavioral cues and nonverbal communication when assessing patients [<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. The use of AI tools may decrease patient trust, posing a barrier to relationship development and high-quality patient assessment [<xref ref-type="bibr" rid="ref24">24</xref>]. However, others acknowledge that AI tools might facilitate better communication with patients who have not experienced adequate rapport with providers in the past [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref24">24</xref>]. While this previous work has charted many benefits and concerns associated with the adoption of AI tools for mental health care, direct investigation of provider perspectives on the anticipated impact that AI tools will have on the dynamics of therapeutic relationships is limited.</p><p>We reported findings on physician perspectives regarding the impact of AI tools on the patient-physician relationship, including its emotional impacts, influences on shared decision-making, and changes to communication. Our findings highlight physician perspectives on the ethical introduction of AI into health care, including key features physicians identify as relevant to supporting authentic therapeutic relationships in mental health care.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Overview</title><p>This paper draws upon an interview dataset that has been described previously [<xref ref-type="bibr" rid="ref27">27</xref>]. Our prior studies focused on the perceived benefits and risks of AI tools in psychiatric medicine, identifying key factors that influence physician acceptance of these technologies. In this paper, we examined a different set of topics, focusing on the impact of these AI tools on the therapeutic relationship. In the following sections, we provide a summary of study methods. Readers may wish to consult our previous paper for additional methodological details [<xref ref-type="bibr" rid="ref27">27</xref>].</p></sec><sec id="s2-2"><title>Recruitment</title><p>Physicians in family medicine and psychiatry specialties with experience treating major depressive disorder were invited to participate in our study via email invitations. These physicians were recruited from a single academic health system in the United States. Family medicine practitioners were included in addition to psychiatrists due to an expanded role for providing mental health care observed in the specialty [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>].</p></sec><sec id="s2-3"><title>Data Collection</title><p>In-depth interviews exploring the adoption of AI into mental health care were conducted via Zoom (Zoom Communications). Interviews leveraged a case-based design to prompt participant discussion and had an average duration of 37 minutes. Three study team members participated in interviews (Susan H Curtis, AMS, and JJS), with 2 members present per interview.</p><p>Interviews began with general questions that explored participants&#x2019; perceptions of AI. To make the discussion more concrete, 2 to 3 hypothetical case scenarios were presented to participants. These cases highlighted different uses of AI based on tools currently marketed or under research. Cases illustrated the following AI uses: (1) a physician-facing tool that assisted a physician in prescribing medications based on patient pharmacogenomic information, (2) a patient-facing chatbot that provided cognitive behavioral therapy, (3) a physician-facing tool for differential diagnosis and associated disease risk scores, and (4) a physician-facing tool that identified patients at risk for suicide based on population health characteristics. Time limitations prevented interviewers from covering all cases in every interview. Interviewers selected cases to ensure that each was represented across the set of interviews. After presenting a case to the participant, the primary interviewer asked the participant to describe their general reactions to the presented AI device and then used structured and unstructured prompts to inquire about more specific issues of AI application. Some of these issues pertained to disclosure of AI device use, AI&#x2019;s effect on the patient-physician relationship, AI&#x2019;s performance relative to physicians, and whether AI impacted physician examination of patients. For instance, interviewers asked questions such as &#x201C;How do you foresee this tool affecting the physician-patient relationship?&#x201D; and &#x201C;Do you have any concerns regarding the interactions between patients and chatbots?&#x201D; The secondary interviewer took notes and asked probing questions as needed.</p><p>All interviews were audio recorded and transcribed by a professional transcription service. Transcripts were deidentified by deleting identifying nouns and role descriptions. Transcripts were reviewed by the study team to identify any errors by comparing them with the original audio recordings.</p></sec><sec id="s2-4"><title>Data Analysis</title><p>Transcripts were qualitatively coded by at least two members of the study team following an inductive approach [<xref ref-type="bibr" rid="ref27">27</xref>]. This approach was informed by grounded theory [<xref ref-type="bibr" rid="ref30">30</xref>], although it was agnostic to the goal of theory development. While grounded theory traditionally aims to generate an explanatory theory, our modified approach aimed to leverage the systematic coding procedures central to grounded theory to structure our analysis and identify themes. One primary coder (IBW) and 1 of 2 secondary coders (SHC and AMS) each independently coded transcripts using NVivo (Lumivero, LLC). Coders subsequently met to discuss their coding decisions, checking for intercoder consistency in codebook application and to resolve any discrepancies [<xref ref-type="bibr" rid="ref31">31</xref>]. The codebook was revised throughout the coding process, with iterative refinement of coding definitions as insights emerged from data analysis [<xref ref-type="bibr" rid="ref32">32</xref>]</p></sec><sec id="s2-5"><title>Ethical Considerations</title><p>The study was approved by the Mayo Clinic Institutional Review Board (protocol 21&#x2010;006191). All participants provided oral consent in accordance with institutional review board guidance. Study data presented in this manuscript have had all personally identifying details omitted to protect participant privacy. Participants enrolled on a strictly voluntary basis and did not receive compensation for their involvement.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Overview</title><p>A total of 143 physicians were invited to participate in our study. Forty-two physicians, including 21 psychiatrists and 21 family medicine practitioners, enrolled and completed an interview. Several major themes arose concerning the dynamics of the therapeutic relationship, influences on shared decision-making, and transparency of AI tools.</p></sec><sec id="s3-2"><title>Physicians Felt That Health Care AI Could Disrupt the Therapeutic Relationship</title><p>Participants identified several ways that AI tools might impact personal interactions and disrupt the therapeutic relationship. One area of disruption stemmed from the potential of AI tools to replace interactions that would typically be handled by a clinical provider. Some physicians were particularly wary of AI tools for the purposes of psychotherapy and the potential to displace established relationships with human practitioners or limit their development. In terms of mental health care delivery, some physicians noted that they &#x201C;don&#x2019;t think it [AI] will replace the patient-physician relationship&#x201D; (interview 35, family medicine); however, there was some concern that patients who defer to AI tools might not receive adequate care depending on the severity of their conditions or health circumstances:</p><disp-quote><p>One potential downside, I don&#x2019;t think it necessarily would be, but [it] could be that [the] patient likes the chatbot a lot more than going to see the human being psychologist, and if that relationship with the treating clinician dries up or isn&#x2019;t there, what happens when the patient, if the patient has a spike in anxiety or becomes depressed, or has some change for the worse in clinical condition that would have otherwise been either identified by the psychologist, or the treatment plan would be adjusted if the patient were still engaged with the psychologist?</p><attrib>Interview 17, psychiatry</attrib></disp-quote><p>In addition, physicians expressed that the experience of developing and cultivating a social relationship with the physician is supportive for patients. They noted the specific value of this relationship in psychotherapeutic contexts where conversation with a human provider may be preferable:</p><disp-quote><p>I think if you start relying on like, &#x201C;Oh, well, the computer algorithm said I should pick this,&#x201D; it takes away some of the humanity. That is why people are benefiting from a psychiatrist, [it] is because you feel heard, and you feel like that person understands you. If it&#x2019;s just popped out, I&#x2019;m not sure that would be there.</p><attrib>Interview 28, psychiatry</attrib></disp-quote><p>Physicians also highlighted the importance of open communication and the reception of emotions, such as empathy and compassion, as aspects of the therapeutic relationship. They noted the importance of being able to respond to nonverbal cues and physically sit with a person who is in pain. &#x201C;There&#x2019;s something about knowing your patients very well [...] being on a relationship level with them that I think is very hard to replace&#x201D; (interview 04, family medicine). Physicians noted that often their patients confide in them because of this carefully cultivated therapeutic relationship and viewed it as essential to health care delivery. Some physicians viewed AI tools as limited in their capacity to capture these emotional aspects:</p><disp-quote><p>I think personal connection is invaluable. [...] Again, eye contact, &#x201C;I hear you,&#x201D; that emotional sense there, which AI can&#x2019;t necessarily do. There probably is some script that says, &#x201C;I can tell this is difficult for you to do,&#x201D; or just empathy-type thing[s], but just you know that it&#x2019;s not a person.</p><attrib>Interview 35, family medicine</attrib></disp-quote><p>Physicians also felt that the introduction of AI tools might create positive changes that could be supportive of the therapeutic relationship overall. For instance, they noted the potential of AI tools to streamline clinical tasks that could support increased time for patient interaction. &#x201C;If it [AI tool] helped [...] facilitate the differential diagnosis, and really gave me more time to maybe create a relationship with the person&#x201D; (interview 20, psychiatry). In addition, physicians saw ways using AI could be perceived as augmenting physician capabilities and would showcase to patients the effort being put into their treatment. Under this view, physicians saw AI tools as supplementing rather than replacing their role in health care delivery:</p><disp-quote><p>I would hope that it would improve the relationship because it&#x2019;s kind of like giving them, like with the chatbot, more tools to help [...] I would think it shows that you are building on knowledge that you have based on what&#x2019;s going in the field and applying it to their treatment which I think it&#x2019;s always a good thing.</p><attrib>Interview 40, psychiatry</attrib></disp-quote><p>Physicians also noted that patient interactions with AI tools might help to avoid miscommunication and countertransference that can occur during a conversation between the physician and patient. &#x201C;There&#x2019;s a lot of transference, counter transference going on between the patients and the therapist. In this [AI chatbot] format, I see that as a non-issue&#x201D; (interview 12, psychiatry). They expressed that adopting AI tools might facilitate better management of these issues in therapeutic contexts compared to conventional patient-physician interactions.</p></sec><sec id="s3-3"><title>Physicians Predicted That AI Would Impact Shared Decision-Making</title><p>Participants anticipated that the adoption of AI tools would influence the dynamics of shared decision-making. Physicians conceptualized interactions with AI as an extension of their clinical tasks, adding evidence-based validation to their decisions, and even potentially enhancing the patient-physician relationship by providing additional support. &#x201C;I think it [AI tool] could be used to open up conversations on things&#x201D; (interview 20, psychiatry). Participants anticipated that they would continue to value their own clinical judgment in shared decision-making conversations and suggested that AI recommendations would need to receive a physician&#x2019;s &#x201C;stamp of approval&#x201D;:</p><disp-quote><p>It might be different if I'm totally cut out of the process, but I'm still the&#x2014;as the clinician, I'm still the critical processing node. This AI information is coming to me, and then I'm making the decision based on this additional information.</p><attrib>Interview 06, family medicine</attrib></disp-quote><p>However, some participants felt that AI might begin to limit physician judgment by determining when physicians needed to be involved. Other physicians worried about the reductionist potential of health care AI being at odds with the aims of medicine. &#x201C;The art of medicine is not&#x2014;I think, incorporated in AI. The confounder that both the doctor and the patient represent&#x201D; (Interview 13, psychiatry). Participants expressed concerns about being replaced as AI begins to guide decision-making, repositioning physicians in an ancillary role. &#x201C;It [AI clinical decision support tool] seems like it&#x2019;s infringing on my years of training and stuff like that, and experience. It&#x2019;s like, well, no. I disagree with that&#x201D; (Interview 16, family medicine).</p><p>Physicians felt that AI might also increase patient engagement in the shared decision-making process. Participants expected to respect patient choices but had concerns about patient interpretations of AI outputs and favoring them over a physician&#x2019;s clinical expertise. For example, some participants compared patient interactions with AI to patients&#x2019; trust in &#x201C;Dr. Google&#x201D; (Interview 23, family medicine) despite the limitations of patients&#x2019; medical knowledge. Generally, participants were willing to attempt to broaden a patient&#x2019;s preconceptions of their diagnosis and the best treatment plan through discussion:</p><disp-quote><p>It'd be interesting if there&#x2019;s a disagreement like the patient says, &#x201C;I agree with the machine and not you.&#x201D; That would be interesting. I think that'd be a good discussion to have. It&#x2019;s probably not unusual that patients will come in with certain ideas on things.</p><attrib>Interview 20, psychiatry</attrib></disp-quote><p>However, some participants anticipated that patients might be overly deferential to AI-supported physician recommendations. For some mental health patients, the physician&#x2019;s office might already be an uncomfortable place that they have prepared for. They may enter with fixed opinions based on their own research. Patients may also experience cognitive entrenchment, causing them to default to information provided by AI tools and pull back from shared decision-making:</p><disp-quote><p>It&#x2019;s a real interesting thing 'cause in the right setting people accept what machines tell them. They come in convinced they have things that they may not, but they've got the control to access it and decide what to do there. Coming into the doctor&#x2019;s office, they don't have the control, and my instinct is they'd still want human intervention.</p><attrib>Interview 18, psychiatry</attrib></disp-quote><p>Physicians presented an idealized way to situate AI within shared decision-making. They noted that the primary interaction is between the physician and the patient, who together should decide whether to incorporate AI tools and recommendations into clinical discussions:</p><disp-quote><p>It&#x2019;s a partnership. It&#x2019;s another tool. That&#x2019;s why you say, &#x201C;Okay. Yeah.&#x201D; That&#x2019;s how I would treat it. It&#x2019;s kind of like we talked about if the computer&#x2019;s the third person in the room, the third thing. It&#x2019;s me and the patient and we bring the computer.</p><attrib>Interview 15, family medicine</attrib></disp-quote><p>As an example, physicians expressed that in educating patients, they could incorporate AI recommendations while explaining their clinical judgment. In addition, physicians could encourage patients to express their reactions to treatment options and express their preferences based on the information provided by AI tools, along with physicians&#x2019; clinical recommendations:</p><disp-quote><p>I use that as a tool to educate my patient. Explain to them what AI is, saying, you know, &#x201C;You have this disease, we have this wonderful technology that does this kind of super-fast calculation or database search, in preparation for recommendations, and this is what it&#x2019;s recommending.&#x201D; Then starting the conversation, &#x201C;I agree with this recommendation,&#x201D; or, &#x201C;I disagree and this is why&#x201D;&#x2014;yeah, I would use it.</p><attrib>Interview 23, family medicine</attrib></disp-quote></sec><sec id="s3-4"><title>Physicians Felt Transparency Around AI Usage Was Key to the Therapeutic Relationship</title><p>Physicians expressed the importance of disclosing the use of AI tools with patients, highlighting the role of transparency in cultivating patient trust as a key part of the therapeutic relationship. &#x201C;I think the important ingredient there is transparency between the patient and the provider and an agreed-upon way that this [AI tool] is going to be used&#x201D; (interview 37, psychiatry). Physicians expected that patients would trust their discretion regarding how to incorporate AI clinical decision support recommendations, along with when to shift away from such recommendations. &#x201C;Again, just like any other tool that we use, I don&#x2019;t think it&#x2019;s going to be unusual for them if they trust us to take care of them and to use the tools that we have&#x201D; (interview 42, family medicine). Physicians felt that this trust was largely established through open communication with patients and respect for patient agreement in the use of tools and their outputs:</p><disp-quote><p>Yeah, I think we have to be open and honest with our patients in everything that we do [...] I don&#x2019;t think you&#x2019;d have any reason to hide that information from them. Just tell them exactly what you&#x2019;re doing, and that you&#x2019;re using this tool to confirm what your suspicions are.</p><attrib>Interview 08, family medicine</attrib></disp-quote><p>Physicians also noted they would determine when and how to disclose their use of AI tools in clinical decision-making based on the depth of their prior established relationship with their patients. In addition to physician-facing AI tools, participants acknowledged that making patients aware of AI-supported decision-making or expanding interactions to patient-facing AI might be daunting for patients struggling with certain mental health conditions. Participants noted that the rapport-building process allows physicians to determine whether disclosing and recommending the use of AI is appropriate and helpful in mental health care:</p><disp-quote><p>Maybe even in the process of going through this with a patient, there may be a good approach, like, &#x201C;When&#x2019;s the right time to suggest this, once we have an established rapport and relationship?&#x201D; and, &#x201C;I think you&#x2019;re making gains, and I think this would help you more.&#x201D; I find maybe that might be where it would be helpful.</p><attrib>Interview 35, family medicine</attrib></disp-quote><p>Physicians anticipated that they would tailor their disclosure of AI use based on the psychiatric conditions of their patients. For instance, some participants stated that they might be less inclined to use the term &#x201C;artificial intelligence&#x201D; with patients who have a history of paranoia. They stated that knowledge of a patient&#x2019;s medical history would be useful to gauge the framing of AI disclosure and to guide the extent to which physicians explain the technical aspects of AI to patients (ie, presentation of AI as a digital tool vs an intelligent algorithm) to avoid any potential confusion. &#x201C;Artificial intelligence, I think, is just one of those words [...] that might just cause some confusion<italic>&#x201D;</italic> (interview 04, family medicine).</p><p>Some physicians highlighted the importance of disclosure to mitigate potential discomfort that could be experienced by patients. &#x201C;I think, if I was a patient, I may feel a little taken aback by that [AI tool] but as long as we&#x2019;re disclosing, I think, I&#x2019;d be comfortable incorporating it&#x201D; (interview 07, family medicine). Despite differing views on how to disclose AI tools, physicians still saw disclosure as important to cultivating patient trust. Furthermore, participants saw transparency and trust as a strong predictor of patient acceptance of AI.</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>There has been a great deal of excitement as well as skepticism concerning the adoption of AI tools in mental health care. Our study aimed to characterize physician perspectives on the impact of health care AI on the patient-physician relationship. Some of our study findings corroborate the broader literature, which examines how AI or digital technology might alter patient-physician interactions within the constraints of the therapeutic dynamic [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. Furthermore, our results inform an emerging literature on the potential for a &#x201C;digital therapeutic alliance,&#x201D; which refers to the potential for a collaborative relationship between a patient and a digital mental health intervention [<xref ref-type="bibr" rid="ref34">34</xref>]. While our results highlight physician skepticism regarding the capacity of AI tools to support authentic therapeutic relationships, they suggest several ways in which traditional therapeutic relationships might be supported by these tools. In addition, our findings point to several key considerations for physicians and patients as they choose to integrate AI into mental health care.</p><p>Physicians place a great deal of value on the therapeutic relationship and are sensitive to certain disruptions that AI tools may cause. The therapeutic relationship has been previously viewed in the face of digital technology or AI advances, with rapport building as a critical element of mental health care [<xref ref-type="bibr" rid="ref26">26</xref>]. For instance, clinicians often give patients the freedom to choose engagement in specific rapport-building activities, promoting more personalized care and greater insight into factors that affect patient mental health concerns [<xref ref-type="bibr" rid="ref26">26</xref>]. Such perspectives align with studies in which clinicians agreed that expression of empathy is a uniquely human skill [<xref ref-type="bibr" rid="ref23">23</xref>]. An immediate barrier for therapy-based AI tools may be limitations on their ability to display emotional intelligence and moral capacity that are comparable to human-based care and insufficient for robust therapeutic relationships [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. As a result, patients who value emotional connection may be particularly vulnerable to illusory representations of these features in AI tools and might find AI-based interactions limiting to their mental health care [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. In addition, cognitive behavioral therapy&#x2013;based digital tools may narrow the diversity of therapeutic activities, which can act as platforms for relationship building [<xref ref-type="bibr" rid="ref26">26</xref>].</p><p>Moreover, physicians in our study observed empathy and emotional support as fundamental in data collection for diagnosis and treatment as well as integral to therapeutic relationships. Arguably, human skills are necessary for maximizing the type and quality of data collected [<xref ref-type="bibr" rid="ref25">25</xref>]. Emotional openness may increase a patient&#x2019;s expression of nonverbal behavioral cues that can be registered and analyzed by the clinician. As already seen in research on patient interviews using electronic medical record templates, some technology-directed interactions lack the emotional subtext of a natural conversation and may be the cause of missing data points that characterize patient symptomology [<xref ref-type="bibr" rid="ref37">37</xref>].</p><p>Our results suggest that applications of AI in mental health care might be best received when they serve as a supplement rather than a replacement for physician input. Promotion of physician oversight over AI supports previous views that digital technology should not replace face-to-face care, that patients may perceive noncollaborative digital care as inferior, and that patients are more likely to accept clinician-driven care that places digital technology as an adjunct [<xref ref-type="bibr" rid="ref25">25</xref>]. Participants noted several scenarios where AI tools could be helpful, such as freeing up time that can be re-allocated to patient interactions and supplementing clinical judgment. In this augmentative capacity, AI tools may enhance rather than undermine aspects of the therapeutic relationship.</p><p>Empathetic communication and transparency about therapeutic goals between physicians and patients were also a key feature in deciding to integrate AI tools. Strong rapport and mutual trust between physicians and patients not only promote more targeted patient care but also provide a platform for physician disclosure of AI use that can be tailored to patient mental health conditions. Our results suggest that, particularly in mental health contexts, there may be some additional consideration for disclosing the use of AI tools to patients. Physicians felt this was a decision best left to their discretion based on a gauge of their relationships with patients. However, lack of transparency in AI systems can cause physicians to inadequately disclose AI tools to their patients [<xref ref-type="bibr" rid="ref38">38</xref>], which risks disrupting physician-to-patient communication.</p><p>Finally, intentional positioning of AI tools within patient-physician relationships may be necessary to minimize negative influences on shared decision-making. In prior work, clinicians have expressed that patient opinions may indeed support the best treatment plan and that patients value the freedom to endorse, question, or dispute the use of AI tools and subsequent treatment recommendations [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref39">39</xref>]. Physicians should be prepared to respectfully broaden patients&#x2019; perspectives of their diagnosis, given potential AI-supported clinical recommendations and interactions. In addition, physicians and patients might collaboratively address how AI tools could be positioned in care decisions such that neither party feels undercut by the role played by these technologies. Ultimately, AI is a tool that, similar to other types of medical tests, is subject to some anticipated level of error. However, such errors may be especially difficult to trace when using AI tools. Input data may undergo multiple hidden transformations before an output is generated. Given this potential and the perspectives shared by our study participants, AI-based clinical tools are best viewed as companions to clinical judgment, and not principal drivers of care decisions.</p></sec><sec id="s4-2"><title>Limitations</title><p>Our study has several limitations. First, participants were presented with different diagnostic and treatment-related AI devices as clinical cases; participants did not have the opportunity to experience using these technologies in clinical practice. Engagement with actual AI devices may have influenced participant perspectives and helped participants to more tangibly conceptualize how AI might affect the therapeutic dynamic. Second, we did not investigate how AI would improve or worsen the therapeutic relationship in a broader range of care settings beyond mental health care. Our study focused primarily on the opinions of physicians working at an academic medical center, which does not capture the full range of settings for mental health care delivery. Third, we did not collect demographic information from participants, which limits our ability to draw inferences based on participant characteristics. Finally, our study focused only on physicians and their perspectives on the therapeutic relationship. As a result, our findings represent only one side of a 2-person dynamic.</p></sec><sec id="s4-3"><title>Future Directions</title><p>Future research should look to gather perspectives from a broader array of stakeholders, including therapists, nurses, social workers, and patients, while grounding opinions in intervention-based studies. Investigation of physician perspectives on how physical AI devices impact the therapeutic relationship might better guide when and how certain AI devices should be integrated. Interventions could range from AI-driven differential diagnostic technologies inputted into electronic health record systems to behavioral health chatbots that can be downloaded on patients&#x2019; mobile devices. These studies could have a focus on access in disadvantaged regions, weighing how psychiatrists and family medicine practitioners aided by AI might be able to serve behavioral health without significant compromise to the therapeutic relationship. Additional research might focus on establishing best practices for clinicians, developers, and AI adopters that support the responsible use of AI tools. For instance, Delphi studies might be used to define optimal strategies for integrating AI tools and managing conflicts in clinical assessment. Similarly, user experience research might support the refinement of AI tool interfaces.</p><p>Our results suggest that future studies should examine patient and mental health professional views concerning the impact of AI tools on the therapeutic relationship. To fully understand this impact, a broader range of mental health professionals and other stakeholders should be included in these studies. Longitudinal engagement with these stakeholders will be critical to the successful integration of AI tools into mental health care.</p></sec><sec id="s4-4"><title>Conclusions</title><p>Without adequate consideration of the impact that AI will have on aspects of the therapeutic relationship, physicians may face unintended consequences from the adoption of AI tools and a disruption to relationships with patients. Our study supports a more comprehensive understanding of these issues by detailing some of the ways physicians anticipate AI will shift dynamics in the therapeutic relationship. As ongoing research and evaluation of AI tools in mental health care aim to support responsible clinical uses of these technologies, it is crucial that stakeholders assess their potential impact on the therapeutic relationship.</p></sec></sec></body><back><ack><p>The authors would like to thank Joel E Pacyna for his involvement in sampling, recruitment, and project conceptualization, as well as Susan H Curtis for her involvement in data collection and analysis as well as reviewing an early draft of the manuscript. Finally, the authors would like to thank Journey L Wise for her feedback on an early draft of the manuscript.</p></ack><notes><sec><title>Funding</title><p>This study was funded by the National Science Foundation (award 2041339). Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation. Additional funding was provided by the Mayo Clinic Center for Individualized Medicine.</p></sec><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: RRS (lead), APA (supporting), WVB (supporting)</p><p>Formal analysis: IBW (lead), AMS (supporting), BAB (supporting), RRS (supporting)</p><p>Funding acquisition: APA (equal), WVB (equal), RRS (equal)</p><p>Investigation: AMS (equal), JJS (equal)</p><p>Methodology: RRS</p><p>Project administration: RRS</p><p>Resources: RRS</p><p>Supervision: RRS</p><p>Writing &#x2013; original draft: IBW (lead), AMS (supporting), RRS (supporting)</p><p>Writing &#x2013; review &#x0026; editing: IBW (lead), AMS (supporting), JJS (supporting), BAB (supporting), APA (supporting), WVB (supporting), RRS (supporting)</p></fn><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hartley</surname><given-names>S</given-names> </name><name name-style="western"><surname>Raphael</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lovell</surname><given-names>K</given-names> </name><name name-style="western"><surname>Berry</surname><given-names>K</given-names> </name></person-group><article-title>Effective nurse-patient relationships in mental health care: a systematic review of interventions to improve the therapeutic alliance</article-title><source>Int J Nurs Stud</source><year>2020</year><month>02</month><volume>102</volume><fpage>103490</fpage><pub-id pub-id-type="doi">10.1016/j.ijnurstu.2019.103490</pub-id><pub-id pub-id-type="medline">31862531</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bordin</surname><given-names>ES</given-names> </name></person-group><article-title>The generalizability of the psychoanalytic concept of the working alliance</article-title><source>Psychother: Theory Res Prac</source><year>1979</year><volume>16</volume><issue>3</issue><fpage>252</fpage><lpage>260</lpage><pub-id pub-id-type="doi">10.1037/h0085885</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Johansson</surname><given-names>H</given-names> </name><name name-style="western"><surname>Eklund</surname><given-names>M</given-names> </name></person-group><article-title>Patients&#x2019; opinion on what constitutes good psychiatric care</article-title><source>Scand J Caring Sci</source><year>2003</year><month>12</month><volume>17</volume><issue>4</issue><fpage>339</fpage><lpage>346</lpage><pub-id pub-id-type="doi">10.1046/j.0283-9318.2003.00233.x</pub-id><pub-id pub-id-type="medline">14629636</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Totura</surname><given-names>CMW</given-names> </name><name name-style="western"><surname>Fields</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Karver</surname><given-names>MS</given-names> </name></person-group><article-title>The role of the therapeutic relationship in psychopharmacological treatment outcomes: a meta-analytic review</article-title><source>Psychiatr Serv</source><year>2018</year><month>01</month><day>1</day><volume>69</volume><issue>1</issue><fpage>41</fpage><lpage>47</lpage><pub-id pub-id-type="doi">10.1176/appi.ps.201700114</pub-id><pub-id pub-id-type="medline">28945182</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hauser</surname><given-names>K</given-names> </name><name name-style="western"><surname>Koerfer</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kuhr</surname><given-names>K</given-names> </name><name name-style="western"><surname>Albus</surname><given-names>C</given-names> </name><name name-style="western"><surname>Herzig</surname><given-names>S</given-names> </name><name name-style="western"><surname>Matthes</surname><given-names>J</given-names> </name></person-group><article-title>Outcome-relevant effects of shared decision making</article-title><source>Dtsch Arztebl Int</source><year>2015</year><month>10</month><day>2</day><volume>112</volume><issue>40</issue><fpage>665</fpage><lpage>671</lpage><pub-id pub-id-type="doi">10.3238/arztebl.2015.0665</pub-id><pub-id pub-id-type="medline">26517594</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Joosten</surname><given-names>EAG</given-names> </name><name name-style="western"><surname>DeFuentes-Merillas</surname><given-names>L</given-names> </name><name name-style="western"><surname>de Weert</surname><given-names>GH</given-names> </name><name name-style="western"><surname>Sensky</surname><given-names>T</given-names> </name><name name-style="western"><surname>van der Staak</surname><given-names>CPF</given-names> </name><name name-style="western"><surname>de Jong</surname><given-names>CAJ</given-names> </name></person-group><article-title>Systematic review of the effects of shared decision-making on patient satisfaction, treatment adherence and health status</article-title><source>Psychother Psychosom</source><year>2008</year><volume>77</volume><issue>4</issue><fpage>219</fpage><lpage>226</lpage><pub-id pub-id-type="doi">10.1159/000126073</pub-id><pub-id pub-id-type="medline">18418028</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Luxton</surname><given-names>DD</given-names> </name></person-group><article-title>Recommendations for the ethical use and design of artificial intelligent care providers</article-title><source>Artif Intell Med</source><year>2014</year><month>09</month><volume>62</volume><issue>1</issue><fpage>1</fpage><lpage>10</lpage><pub-id pub-id-type="doi">10.1016/j.artmed.2014.06.004</pub-id><pub-id pub-id-type="medline">25059820</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McCradden</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hui</surname><given-names>K</given-names> </name><name name-style="western"><surname>Buchman</surname><given-names>DZ</given-names> </name></person-group><article-title>Evidence, ethics and the promise of artificial intelligence in psychiatry</article-title><source>J Med Ethics</source><year>2023</year><month>08</month><volume>49</volume><issue>8</issue><fpage>573</fpage><lpage>579</lpage><pub-id pub-id-type="doi">10.1136/jme-2022-108447</pub-id><pub-id pub-id-type="medline">36581457</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Terra</surname><given-names>M</given-names> </name><name name-style="western"><surname>Baklola</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ali</surname><given-names>S</given-names> </name><name name-style="western"><surname>El-Bastawisy</surname><given-names>K</given-names> </name></person-group><article-title>Opportunities, applications, challenges and ethical implications of artificial intelligence in psychiatry: a narrative review</article-title><source>Egypt J Neurol Psychiatry Neurosurg</source><year>2023</year><month>06</month><day>20</day><volume>59</volume><issue>1</issue><fpage>80</fpage><pub-id pub-id-type="doi">10.1186/s41983-023-00681-z</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fisher</surname><given-names>CE</given-names> </name></person-group><article-title>The real ethical issues with AI for clinical psychiatry</article-title><source>Int Rev Psychiatry</source><year>2025</year><month>02</month><volume>37</volume><issue>1</issue><fpage>14</fpage><lpage>20</lpage><pub-id pub-id-type="doi">10.1080/09540261.2024.2376575</pub-id><pub-id pub-id-type="medline">40035378</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ibrahim</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Mohamed Yusoff</surname><given-names>H</given-names> </name><name name-style="western"><surname>Abu Bakar</surname><given-names>YI</given-names> </name><name name-style="western"><surname>Thwe Aung</surname><given-names>MM</given-names> </name><name name-style="western"><surname>Abas</surname><given-names>MI</given-names> </name><name name-style="western"><surname>Ramli</surname><given-names>RA</given-names> </name></person-group><article-title>Digital health for quality healthcare: a systematic mapping of review studies</article-title><source>Digit Health</source><year>2022</year><volume>8</volume><fpage>20552076221085810</fpage><pub-id pub-id-type="doi">10.1177/20552076221085810</pub-id><pub-id pub-id-type="medline">35340904</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhugra</surname><given-names>D</given-names> </name><name name-style="western"><surname>Tasman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Pathare</surname><given-names>S</given-names> </name><etal/></person-group><article-title>The WPA-lancet psychiatry commission on the future of psychiatry</article-title><source>Lancet Psychiatry</source><year>2017</year><month>10</month><volume>4</volume><issue>10</issue><fpage>775</fpage><lpage>818</lpage><pub-id pub-id-type="doi">10.1016/S2215-0366(17)30333-4</pub-id><pub-id pub-id-type="medline">28946952</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Grant</surname><given-names>CW</given-names> </name><name name-style="western"><surname>Marrero-Polanco</surname><given-names>J</given-names> </name><name name-style="western"><surname>Joyce</surname><given-names>JB</given-names> </name><etal/></person-group><article-title>Pharmacogenomic augmented machine learning in electronic health record alerts: a health system-wide usability survey of clinicians</article-title><source>Clin Transl Sci</source><year>2024</year><month>10</month><volume>17</volume><issue>10</issue><fpage>e70044</fpage><pub-id pub-id-type="doi">10.1111/cts.70044</pub-id><pub-id pub-id-type="medline">39402925</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khawaja</surname><given-names>Z</given-names> </name><name name-style="western"><surname>B&#x00E9;lisle-Pipon</surname><given-names>JC</given-names> </name></person-group><article-title>Your robot therapist is not your therapist: understanding the role of AI-powered mental health chatbots</article-title><source>Front Digit Health</source><year>2023</year><volume>5</volume><fpage>1278186</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2023.1278186</pub-id><pub-id pub-id-type="medline">38026836</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sedlakova</surname><given-names>J</given-names> </name><name name-style="western"><surname>Trachsel</surname><given-names>M</given-names> </name></person-group><article-title>Conversational artificial intelligence in psychotherapy: a new therapeutic tool or agent?</article-title><source>Am J Bioeth</source><year>2023</year><month>05</month><volume>23</volume><issue>5</issue><fpage>4</fpage><lpage>13</lpage><pub-id pub-id-type="doi">10.1080/15265161.2022.2048739</pub-id><pub-id pub-id-type="medline">35362368</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Meadi</surname><given-names>MR</given-names> </name><name name-style="western"><surname>Sillekens</surname><given-names>T</given-names> </name><name name-style="western"><surname>Metselaar</surname><given-names>S</given-names> </name><name name-style="western"><surname>van Balkom</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bernstein</surname><given-names>J</given-names> </name><name name-style="western"><surname>Batelaan</surname><given-names>N</given-names> </name></person-group><article-title>Exploring the ethical challenges of conversational AI in mental health care: scoping review</article-title><source>JMIR Ment Health</source><year>2025</year><month>02</month><day>21</day><volume>12</volume><fpage>e60432</fpage><pub-id pub-id-type="doi">10.2196/60432</pub-id><pub-id pub-id-type="medline">39983102</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>HS</given-names> </name><name name-style="western"><surname>Wright</surname><given-names>C</given-names> </name><name name-style="western"><surname>Ferranto</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Artificial intelligence conversational agents in mental health: patients see potential, but prefer humans in the loop</article-title><source>Front Psychiatry</source><year>2024</year><volume>15</volume><fpage>1505024</fpage><pub-id pub-id-type="doi">10.3389/fpsyt.2024.1505024</pub-id><pub-id pub-id-type="medline">39957757</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Phan</surname><given-names>TA</given-names> </name><name name-style="western"><surname>Bui</surname><given-names>VD</given-names> </name></person-group><article-title>AI with a heart: how perceived authenticity and warmth shape trust in healthcare chatbots</article-title><source>J Mark Commun</source><year>2025</year><month>05</month><day>21</day><fpage>1</fpage><lpage>21</lpage><pub-id pub-id-type="doi">10.1080/13527266.2025.2508887</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abbasgholizadeh Rahimi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Cwintal</surname><given-names>M</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Application of artificial intelligence in shared decision making: scoping review</article-title><source>JMIR Med Inform</source><year>2022</year><month>08</month><day>9</day><volume>10</volume><issue>8</issue><fpage>e36199</fpage><pub-id pub-id-type="doi">10.2196/36199</pub-id><pub-id pub-id-type="medline">35943793</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Olawade</surname><given-names>DB</given-names> </name><name name-style="western"><surname>Wada</surname><given-names>OZ</given-names> </name><name name-style="western"><surname>Odetayo</surname><given-names>A</given-names> </name><name name-style="western"><surname>David-Olawade</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Asaolu</surname><given-names>F</given-names> </name><name name-style="western"><surname>Eberhardt</surname><given-names>J</given-names> </name></person-group><article-title>Enhancing mental health with artificial intelligence: current trends and future prospects</article-title><source>J Med Surg Public Health</source><year>2024</year><month>08</month><volume>3</volume><fpage>100099</fpage><pub-id pub-id-type="doi">10.1016/j.glmedi.2024.100099</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fiske</surname><given-names>A</given-names> </name><name name-style="western"><surname>Henningsen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Buyx</surname><given-names>A</given-names> </name></person-group><article-title>Your robot therapist will see you now: ethical implications of embodied artificial intelligence in psychiatry, psychology, and psychotherapy</article-title><source>J Med Internet Res</source><year>2019</year><month>05</month><day>9</day><volume>21</volume><issue>5</issue><fpage>e13216</fpage><pub-id pub-id-type="doi">10.2196/13216</pub-id><pub-id pub-id-type="medline">31094356</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doraiswamy</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bodner</surname><given-names>K</given-names> </name></person-group><article-title>Artificial intelligence and the future of psychiatry: insights from a global physician survey</article-title><source>Artif Intell Med</source><year>2020</year><month>01</month><volume>102</volume><fpage>101753</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2019.101753</pub-id><pub-id pub-id-type="medline">31980092</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kaptchuk</surname><given-names>TJ</given-names> </name><name name-style="western"><surname>Bernstein</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Mandl</surname><given-names>KD</given-names> </name><name name-style="western"><surname>Halamka</surname><given-names>JD</given-names> </name><name name-style="western"><surname>DesRoches</surname><given-names>CM</given-names> </name></person-group><article-title>Artificial intelligence and the future of primary care: exploratory qualitative study of UK general practitioners&#x2019; views</article-title><source>J Med Internet Res</source><year>2019</year><month>03</month><day>20</day><volume>21</volume><issue>3</issue><fpage>e12802</fpage><pub-id pub-id-type="doi">10.2196/12802</pub-id><pub-id pub-id-type="medline">30892270</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Locher</surname><given-names>C</given-names> </name><name name-style="western"><surname>Leon-Carlyle</surname><given-names>M</given-names> </name><name name-style="western"><surname>Doraiswamy</surname><given-names>M</given-names> </name></person-group><article-title>Artificial intelligence and the future of psychiatry: qualitative findings from a global physician survey</article-title><source>Digit Health</source><year>2020</year><volume>6</volume><fpage>2055207620968355</fpage><pub-id pub-id-type="doi">10.1177/2055207620968355</pub-id><pub-id pub-id-type="medline">33194219</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bucci</surname><given-names>S</given-names> </name><name name-style="western"><surname>Berry</surname><given-names>N</given-names> </name><name name-style="western"><surname>Morris</surname><given-names>R</given-names> </name><etal/></person-group><article-title>&#x201C;They are not hard-to-reach clients. We have just got hard-to-reach services.&#x201D; Staff views of digital health tools in specialist mental health services</article-title><source>Front Psychiatry</source><year>2019</year><volume>10</volume><fpage>344</fpage><pub-id pub-id-type="doi">10.3389/fpsyt.2019.00344</pub-id><pub-id pub-id-type="medline">31133906</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lattie</surname><given-names>EG</given-names> </name><name name-style="western"><surname>Nicholas</surname><given-names>J</given-names> </name><name name-style="western"><surname>Knapp</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Skerl</surname><given-names>JJ</given-names> </name><name name-style="western"><surname>Kaiser</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Mohr</surname><given-names>DC</given-names> </name></person-group><article-title>Opportunities for and tensions surrounding the use of technology-enabled mental health services in community mental health care</article-title><source>Adm Policy Ment Health</source><year>2020</year><month>01</month><volume>47</volume><issue>1</issue><fpage>138</fpage><lpage>149</lpage><pub-id pub-id-type="doi">10.1007/s10488-019-00979-2</pub-id><pub-id pub-id-type="medline">31535235</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Stroud</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Curtis</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Weir</surname><given-names>IB</given-names> </name><etal/></person-group><article-title>Physician perspectives on the potential benefits and risks of applying artificial intelligence in psychiatric medicine: qualitative study</article-title><source>JMIR Ment Health</source><year>2025</year><month>02</month><day>10</day><volume>12</volume><fpage>e64414</fpage><pub-id pub-id-type="doi">10.2196/64414</pub-id><pub-id pub-id-type="medline">39928397</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hager</surname><given-names>KD</given-names> </name><name name-style="western"><surname>Albee</surname><given-names>JN</given-names> </name><name name-style="western"><surname>O&#x2019;Donnell</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Impact of interprofessional care conferences across primary care and mental health organizations on family medicine resident learning</article-title><source>Fam Med</source><year>2021</year><month>04</month><volume>53</volume><issue>4</issue><fpage>289</fpage><lpage>294</lpage><pub-id pub-id-type="doi">10.22454/FamMed.2021.329815</pub-id><pub-id pub-id-type="medline">33887052</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Park</surname><given-names>LT</given-names> </name><name name-style="western"><surname>Zarate</surname><given-names>CA</given-names> </name></person-group><article-title>Depression in the primary care setting</article-title><source>N Engl J Med</source><year>2019</year><month>02</month><day>7</day><volume>380</volume><issue>6</issue><fpage>559</fpage><lpage>568</lpage><pub-id pub-id-type="doi">10.1056/NEJMcp1712493</pub-id><pub-id pub-id-type="medline">30726688</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Corbin</surname><given-names>J</given-names> </name><name name-style="western"><surname>Strauss</surname><given-names>A</given-names> </name></person-group><source>Basics of Qualitative Research: Techniques and Procedures for Developing Grounded Theory</source><year>2008</year><edition>3</edition><publisher-name>SAGE Publications, Inc</publisher-name><pub-id pub-id-type="doi">10.4135/9781452230153</pub-id><pub-id pub-id-type="other">978-1-4129-0644-9</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>O&#x2019;Connor</surname><given-names>C</given-names> </name><name name-style="western"><surname>Joffe</surname><given-names>H</given-names> </name></person-group><article-title>Intercoder reliability in qualitative research: debates and practical guidelines</article-title><source>Int J Qual Methods</source><year>2020</year><month>01</month><day>1</day><volume>19</volume><fpage>1609406919899220</fpage><pub-id pub-id-type="doi">10.1177/1609406919899220</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>MacQueen</surname><given-names>KM</given-names> </name><name name-style="western"><surname>McLellan</surname><given-names>E</given-names> </name><name name-style="western"><surname>Kay</surname><given-names>K</given-names> </name><name name-style="western"><surname>Milstein</surname><given-names>B</given-names> </name></person-group><article-title>Codebook development for team-based qualitative analysis</article-title><source>CAM J</source><year>1998</year><month>05</month><volume>10</volume><issue>2</issue><fpage>31</fpage><lpage>36</lpage><pub-id pub-id-type="doi">10.1177/1525822X980100020301</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sauerbrei</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kerasidou</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lucivero</surname><given-names>F</given-names> </name><name name-style="western"><surname>Hallowell</surname><given-names>N</given-names> </name></person-group><article-title>The impact of artificial intelligence on the person-centred, doctor-patient relationship: some problems and solutions</article-title><source>BMC Med Inform Decis Mak</source><year>2023</year><month>04</month><day>20</day><volume>23</volume><issue>1</issue><fpage>73</fpage><pub-id pub-id-type="doi">10.1186/s12911-023-02162-y</pub-id><pub-id pub-id-type="medline">37081503</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Malouin-Lachance</surname><given-names>A</given-names> </name><name name-style="western"><surname>Capolupo</surname><given-names>J</given-names> </name><name name-style="western"><surname>Laplante</surname><given-names>C</given-names> </name><name name-style="western"><surname>Hudon</surname><given-names>A</given-names> </name></person-group><article-title>Does the digital therapeutic alliance exist? Integrative review</article-title><source>JMIR Ment Health</source><year>2025</year><month>02</month><day>7</day><volume>12</volume><fpage>e69294</fpage><pub-id pub-id-type="doi">10.2196/69294</pub-id><pub-id pub-id-type="medline">39924298</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>EE</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>De Choudhury</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Artificial intelligence for mental health care: clinical applications, barriers, facilitators, and artificial wisdom</article-title><source>Biol Psychiatry Cogn Neurosci Neuroimaging</source><year>2021</year><month>09</month><volume>6</volume><issue>9</issue><fpage>856</fpage><lpage>864</lpage><pub-id pub-id-type="doi">10.1016/j.bpsc.2021.02.001</pub-id><pub-id pub-id-type="medline">33571718</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Conitzer</surname><given-names>V</given-names> </name><name name-style="western"><surname>Sinnott-Armstrong</surname><given-names>W</given-names> </name><name name-style="western"><surname>Borg</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Deng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Kramer</surname><given-names>M</given-names> </name></person-group><article-title>Moral decision making frameworks for artificial intelligence</article-title><source>Proc AAAI Conf Artif Intell</source><year>2017</year><volume>31</volume><issue>1</issue><pub-id pub-id-type="doi">10.1609/aaai.v31i1.11140</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bauer</surname><given-names>M</given-names> </name><name name-style="western"><surname>Monteith</surname><given-names>S</given-names> </name><name name-style="western"><surname>Geddes</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Automation to optimise physician treatment of individual patients: examples in psychiatry</article-title><source>Lancet Psychiatry</source><year>2019</year><month>04</month><volume>6</volume><issue>4</issue><fpage>338</fpage><lpage>349</lpage><pub-id pub-id-type="doi">10.1016/S2215-0366(19)30041-0</pub-id><pub-id pub-id-type="medline">30904127</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lane</surname><given-names>N</given-names> </name><name name-style="western"><surname>Broome</surname><given-names>M</given-names> </name></person-group><article-title>Towards personalised predictive psychiatry in clinical practice: an ethical perspective</article-title><source>Br J Psychiatry</source><year>2022</year><month>04</month><volume>220</volume><issue>4</issue><fpage>172</fpage><lpage>174</lpage><pub-id pub-id-type="doi">10.1192/bjp.2022.37</pub-id><pub-id pub-id-type="medline">35249567</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Richardson</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>C</given-names> </name><name name-style="western"><surname>Curtis</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Patient apprehensions about the use of artificial intelligence in healthcare</article-title><source>NPJ Digit Med</source><year>2021</year><month>09</month><day>21</day><volume>4</volume><issue>1</issue><fpage>140</fpage><pub-id pub-id-type="doi">10.1038/s41746-021-00509-1</pub-id><pub-id pub-id-type="medline">34548621</pub-id></nlm-citation></ref></ref-list></back></article>