<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id><journal-id journal-id-type="publisher-id">mental</journal-id><journal-id journal-id-type="index">16</journal-id><journal-title>JMIR Mental Health</journal-title><abbrev-journal-title>JMIR Ment Health</abbrev-journal-title><issn pub-type="epub">2368-7959</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v11i1e60589</article-id><article-id pub-id-type="doi">10.2196/60589</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Use of AI in Mental Health Care: Community and Mental Health Professionals Survey</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Cross</surname><given-names>Shane</given-names></name><degrees>MPsych, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Bell</surname><given-names>Imogen</given-names></name><degrees>BSc, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Nicholas</surname><given-names>Jennifer</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Valentine</surname><given-names>Lee</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Mangelsdorf</surname><given-names>Shaminka</given-names></name><degrees>DPsych</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Baker</surname><given-names>Simon</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Titov</surname><given-names>Nick</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Alvarez-Jimenez</surname><given-names>Mario</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Orygen Digital</institution>, <addr-line>35 Poplar Rd, Parkville</addr-line><addr-line>Melbourne</addr-line>, <country>Australia</country></aff><aff id="aff2"><institution>Centre for Youth Mental Health, University of Melbourne</institution>, <addr-line>Melbourne</addr-line>, <country>Australia</country></aff><aff id="aff3"><institution>School of Psychological Sciences, Macquarie University</institution>, <addr-line>Sydney</addr-line>, <country>Australia</country></aff><aff id="aff4"><institution>MindSpot</institution>, <addr-line>Sydney</addr-line>, <country>Australia</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Torous</surname><given-names>John</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Pulier</surname><given-names>Myron</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Velmovitsky</surname><given-names>Pedro</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Shane Cross, MPsych, PhD, Orygen Digital, 35 Poplar Rd, Parkville, Melbourne, 3052, Australia, 61 3 9966 9383; <email>shane.cross@orygen.org.au</email></corresp></author-notes><pub-date pub-type="collection"><year>2024</year></pub-date><pub-date pub-type="epub"><day>11</day><month>10</month><year>2024</year></pub-date><volume>11</volume><elocation-id>e60589</elocation-id><history><date date-type="received"><day>16</day><month>05</month><year>2024</year></date><date date-type="accepted"><day>30</day><month>07</month><year>2024</year></date></history><copyright-statement>&#x00A9; Shane Cross, Imogen Bell, Jennifer Nicholas, Lee Valentine, Shaminka Mangelsdorf, Simon Baker, Nick Titov, Mario Alvarez-Jimenez. Originally published in JMIR Mental Health (<ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org">https://mental.jmir.org</ext-link>), 11.10.2024. </copyright-statement><copyright-year>2024</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org/">https://mental.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mental.jmir.org/2024/1/e60589"/><abstract><sec><title>Background</title><p>Artificial intelligence (AI) has been increasingly recognized as a potential solution to address mental health service challenges by automating tasks and providing new forms of support.</p></sec><sec><title>Objective</title><p>This study is the first in a series which aims to estimate the current rates of AI technology use as well as perceived benefits, harms, and risks experienced by community members (CMs) and mental health professionals (MHPs).</p></sec><sec sec-type="methods"><title>Methods</title><p>This study involved 2 web-based surveys conducted in Australia. The surveys collected data on demographics, technology comfort, attitudes toward AI, specific AI use cases, and experiences of benefits and harms from AI use. Descriptive statistics were calculated, and thematic analysis of open-ended responses were conducted.</p></sec><sec sec-type="results"><title>Results</title><p>The final sample consisted of 107 CMs and 86 MHPs. General attitudes toward AI varied, with CMs reporting neutral and MHPs reporting more positive attitudes. Regarding AI usage, 28% (30/108) of CMs used AI, primarily for quick support (18/30, 60%) and as a personal therapist (14/30, 47%). Among MHPs, 43% (37/86) used AI; mostly for research (24/37, 6<named-content content-type="background:#00E676"/><named-content content-type="background:#00E676"/>5%) and report writing (20/37, 54%). While the majority found AI to be generally beneficial (23/30, 77% of CMs and 34/37, 92% of MHPs), specific harms and concerns were experienced by 47% (14/30) of CMs and 51% (19/37) of MHPs. There was an equal mix of positive and negative sentiment toward the future of AI in mental health care in open feedback.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Commercial AI tools are increasingly being used by CMs and MHPs. Respondents believe AI will offer future advantages for mental health care in terms of accessibility, cost reduction, personalization, and work efficiency. However, they were equally concerned about reducing human connection, ethics, privacy and regulation, medical errors, potential for misuse, and data security. Despite the immense potential, integration into mental health systems must be approached with caution, addressing legal and ethical concerns while developing safeguards to mitigate potential harms. Future surveys are planned to track use and acceptability of AI and associated issues over time.</p></sec></abstract><kwd-group><kwd>mental health</kwd><kwd>health care</kwd><kwd>AI</kwd><kwd>community members</kwd><kwd>mental health professional</kwd><kwd>web-based survey</kwd><kwd>Australia</kwd><kwd>descriptive statistic</kwd><kwd>thematic analysis</kwd><kwd>cost reduction</kwd><kwd>data security</kwd><kwd>digital health</kwd><kwd>digital intervention</kwd><kwd>artificial intelligence</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Mental ill health is the leading cause of disability worldwide [<xref ref-type="bibr" rid="ref1">1</xref>], yet fewer than half of all people with a mental health condition seek or receive evidence-based treatment [<xref ref-type="bibr" rid="ref2">2</xref>-<xref ref-type="bibr" rid="ref4">4</xref>]. Among the key structural barriers to effective care is that the demand outstrips the supply of qualified mental health professionals (MHPs), resulting in severely limited access and excessive wait times [<xref ref-type="bibr" rid="ref5">5</xref>]. Moreover, MHPs are frequently burdened by substantial time-intensive administrative responsibilities and tasks, such as note-taking, detailed report writing, and planning for therapeutic sessions, limiting their availability to provide clinical care [<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>As digital technology becomes commonplace in society, tasks and services that were once performed manually, and often slowly, are now accomplished more efficiently via automated technology systems. However, despite many industries embracing new technologies for enhanced efficiency and responsiveness, the same progress has not been made in mental health care. Mental health care remains inaccessible, cumbersome to navigate, reactive, and slow to deliver, leaving mental health consumers frustrated and care providers burnt out [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>People now have much greater access to information, including medical information and their own health data, than ever before [<xref ref-type="bibr" rid="ref8">8</xref>]. In a contemporary landscape, where the prevalence of &#x201C;on-demand&#x201D; services is increasing, both mental health consumers and MHPs may expect comparable responsiveness. As a result, many are turning to digital products and services that aim to immediately address their needs. Young people, for example, are open and interested in using a range of digital technologies for mental health support, and many clinicians are already using these tools as part of routine care [<xref ref-type="bibr" rid="ref9">9</xref>]. Wide-scale adoption of telehealth through the COVID-19 pandemic demonstrated the capacity for services to shift in response to changing demands, resulting in MHPs strongly endorsing the ongoing provision of technology-enhanced services [<xref ref-type="bibr" rid="ref10">10</xref>]. This shift in attitude toward digital technology reflects an acknowledgment of the potential it holds for addressing barriers to providing effective and accessible care.</p><p>Recent advances in artificial intelligence (AI) have raised both excitement and debate concerning opportunities to harness this technology for mental health care [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>]. AI encompasses a range of computer-based digital techniques and methodologies that perform cognitive processes characteristic of humans; such as learning, problem solving, pattern recognition, generalization, and predictive inference [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. The recent advancement in natural language processing (NLP), a specialized branch of AI, has enabled chatbots and other language-driven systems to address requests, respond to queries, and provide advice autonomously, without human intervention [<xref ref-type="bibr" rid="ref15">15</xref>]. Commercial tools such as ChatGPT enable users to enter any kind of query and obtain real-time responses significantly faster than traditional methods, such as internet searches.</p><p>A wide range of AI enabled products and services have been trialled in mental health care and health care more broadly [<xref ref-type="bibr" rid="ref14">14</xref>]. AI has been used by health professionals to help solve complex problems such as identifying and diagnosing anomalies in medical images and genetic testing, predicting medical risk and disease prognosis, facilitating diagnostic and treatment decisions and recording and classifying clinical progress notes to name but a few [<xref ref-type="bibr" rid="ref14">14</xref>]. For people with mental health difficulties, platforms such as Woebot [<xref ref-type="bibr" rid="ref16">16</xref>] have been developed that use chatbots to deliver cognitive-behavioural therapy. More recently, tools such as ChatGPT have become freely available to the public, and with over 100 million users in its first few months, it was the fastest growing commercial application in history [<xref ref-type="bibr" rid="ref17">17</xref>]. According to one community survey of over 1000 people in Australia, just under a half (48%) of Australians had heard of ChatGPT and almost a quarter (23%) had used it, with millennials (born between 1981 and 1996) and those with bachelor degrees and higher making up the majority of users [<xref ref-type="bibr" rid="ref18">18</xref>]. Another youth survey found that 70% of young people 14&#x2010;17 have used ChatGPT, with 59% using it for study and 42% for completing school assignments [<xref ref-type="bibr" rid="ref19">19</xref>].</p><p>Large language model technologies like ChatGPT, are increasingly used by certain groups of consumers as an alternative to seeing a qualified MHP, and by some MHPs to assist with burdensome administrative tasks [<xref ref-type="bibr" rid="ref20">20</xref>]. A recent global survey of approximately 800 psychiatrists [<xref ref-type="bibr" rid="ref21">21</xref>] found that 75% thought it likely that AI would provide medical documentation, 54% to synthesize patient information to reach a diagnosis, 51% to analyze patient information to establish prognosis, and 47% to formulate personalized medication or therapy treatment plans for patients. In total, 36% felt that the benefits of AI would outweigh the risks, 25% felt that the risks would outweigh the benefits, and the rest were uncertain. These findings indicate that the segments of the mental health workforce anticipate that AI will be involved in care provision in some way, and that there are clearly risks and benefits which must be better understood.</p><p>The use of AI to support mental health care does come with potential harms [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. For people using AI for mental health support there is the risk of misdiagnosis or misinformation, stemming from AI&#x2019;s potential for error. There are also questions about the role of empathy in AI systems, although a recent study highlighted that AI can outperform physicians in empathy measures [<xref ref-type="bibr" rid="ref22">22</xref>]. Data privacy emerges as another salient issue, given the sensitive nature of mental health information and the potential for data breaches or misuse [<xref ref-type="bibr" rid="ref23">23</xref>]. Biases inherent in nonrepresentative training data can l<named-content content-type="background:#b388ff">ead to issues of inequity i</named-content>n diagnoses or treatments while the often-opaque decision-making processes of AI systems raise concerns about how complex decisions were made. These potential risks can lead to adverse consequences, and currently, there is limited information regarding the potentially harmful effects of these systems. Consequently, there is a dearth of legislation to safeguard users against such detrimental outcomes [<xref ref-type="bibr" rid="ref23">23</xref>].</p><p>With the widescale popularity of AI technologies such as ChatGPT, it is highly likely that many of these applications are already being used in various ways for mental health care. As society continues to debate the various benefits and risks that this brings, it is critical to understand how and why these technologies are currently being used in the context of mental health care. This study is the first in a series of planned surveys which aims to estimate the current rates of use of AI technology by CMs for mental health and well-being purposes, as well as MHPs for professional purposes, to better understand the scale of use as well as the experienced benefits, harms, and risks associated with its use.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Design and Setting</title><p>Community members (CMs) and MHPs were invited to complete one of two web-based surveys. The CM survey was advertised to the general population of people aged 16 years and older who reside in Australia. The MHP survey was advertised to MHPs who reside in Australia. The survey was advertised on social media platforms including LinkedIn, Instagram, and Facebook using a snowballing method for 8 weeks between mid-February and mid-April 2024.</p></sec><sec id="s2-2"><title>Procedure</title><p>The web-based survey was administered using Qualtrics XM (Qualtrics). After accessing the survey link, interested potential participants were screened for eligibility (aged older than 16 years and residing in Australia).</p></sec><sec id="s2-3"><title>Measures</title><p>The survey included questions regarding the following topics:</p><list list-type="bullet"><list-item><p>Participant characteristics: demographics for both surveys and clinical service use, and Kessler 10-Item Scale (K10) [<xref ref-type="bibr" rid="ref24">24</xref>] mental health measure for the CMs survey only. The K10 is scored between 10 and 50. The score ranges are normal (10-19), mild distress (20-24), moderate distress (25-30), and severe distress (&#x003E;30).</p></list-item><list-item><p>Technology comfort, attitudes, and use: MHPs and CMs comfort with technology, their attitudes to AI using the AI attitudes scale [<xref ref-type="bibr" rid="ref25">25</xref>], their interest in AI and their intention for future AI use. The AI attitudes scale is a 4-item scale which asks about general attitudes to AI. Each item is scored between 1 and 10, and the total score is the average score of the 4 items. It has good internal consistency with a Cronbach &#x03B1; of 0.82.</p></list-item><list-item><p>AI use cases: a number of exemplar AI use cases (see <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) to (1) support CMs mental health and well-being, and (2) to support their MHPs in performing their work duties. CMs and MHPs were presented with a series of potential use cases where AI could assist with specific tasks. Respondents were asked to rate on a scale of 0 to 10 how likely it was that they would use AI for the specific use case.</p></list-item><list-item><p>Use, experienced benefits and harms: questions for the subset of those who have used AI tools pertaining to their experiences of benefit, harm, and risk.</p></list-item><list-item><p>Free-text, open-ended responses to both groups about what excites or concerns them regarding the use of AI for mental health care.</p></list-item></list><p>The survey is available from the authors upon request.</p></sec><sec id="s2-4"><title>Statistical Analyses</title><p>Quantitative data were analyzed using descriptive statistics in SPSS (version 22.0; IBM Corp). The sentiment and thematic elements in free-text responses within free-text responses were manually sorted by classifying responses into positive, negative, or neutral categories based on specific keywords and context. Each response was read in its entirety, and sentiments were categorized as positive, negative, or neutral based on the presence of specific keywords and the overall context of the response. Positive sentiments were identified by words such as &#x201C;hope,&#x201D; &#x201C;benefit,&#x201D; and &#x201C;optimistic,&#x201D; among others. Negative sentiments were indicated by terms like &#x201C;concern,&#x201D; &#x201C;risk,&#x201D; and &#x201C;fear.&#x201D; Responses lacking clear sentiment indicators or expressing ambiguous feelings were classified as neutral. Concurrently, thematic analysis [<xref ref-type="bibr" rid="ref26">26</xref>] was performed to identify and interpret patterns within the data. This involved an iterative process of reading and coding the data, generating initial codes, and collating codes into potential themes. Themes were reviewed and refined through discussions among the research team to ensure they accurately represented the data set.</p></sec><sec id="s2-5"><title>Ethical Considerations</title><p>Ethical approval for the use of the data was obtained from the University of Melbourne Human Research Ethics Committee (reference 2024-27805-48669-4). This study complied with the Declaration of Helsinki.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Sample Characteristics</title><p>The final sample consisted of 107 CMs and 86 MHPs. Demographic characteristics of both samples are presented in <xref ref-type="table" rid="table1">Table 1</xref>. The mean age of both groups was equivalent. The majority of CMs were employed or studying. MHPs tended to have higher levels of formal education than CMs. For MHPs, the majority were either clinical or general psychologists (21/86, <named-content content-type="background:#00E676"/><named-content content-type="background:#00E676"/>40%). For CMs, the majority had a previous mental health diagnosis or significant difficulties with their mental health or emotional well-being (76/108, 70%), and the majority had also seen a professional for these difficulties (74/108, 69%). The mean K10 score was 22.8 (SD 8.9), indicating mild levels of psychological distress.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Characteristics of CMs<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> and MHPs<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup>.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top" colspan="3">Demographic characteristics</td><td align="left" valign="bottom">CMs (n=108)</td><td align="left" valign="bottom">MHPs (n=86)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Age (years), mean (SD)</td><td align="left" valign="top">36.9 (16.2)</td><td align="left" valign="top">41.7 (10.9)</td></tr><tr><td align="left" valign="top" colspan="5"><bold>Gender, n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Male</td><td align="left" valign="top">29 (26.9)</td><td align="left" valign="top">27 (31.4)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Female</td><td align="left" valign="top">71 (65.7)</td><td align="left" valign="top">59 (67)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Nonbinary, gender diverse, or nonconforming</td><td align="left" valign="top">5 (4.6)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Prefer not to say</td><td align="left" valign="top">3 (2.8)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top" colspan="5"><bold>Aboriginal or Torres Strait Islander, n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Yes</td><td align="left" valign="top">4 (3.7)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">No</td><td align="left" valign="top">103 (95.4)</td><td align="left" valign="top">84 (98)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Prefer not to say</td><td align="left" valign="top">1 (0.9)</td><td align="left" valign="top">2 (2)</td></tr><tr><td align="left" valign="top" colspan="3"><bold>Employment, n (%)</bold></td><td align="left" valign="top"/><td align="left" valign="top"/></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Employed</td><td align="left" valign="top">69 (63.9)</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Student</td><td align="left" valign="top">20 (18.5)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Not in the labor force (looking for work, volunteer work, pensioner, or home duties)</td><td align="left" valign="top">19 (17.6)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top" colspan="5"><bold>Income after tax (Aus $; conversion rate of Aus $1=US $0.68 is applicable), n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">&#x003C;$45,000</td><td align="left" valign="top">41 (38.3)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">$45,001&#x2013;$120,000</td><td align="left" valign="top">48 (44.9)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">&#x003E;$120,001</td><td align="left" valign="top">19 (16.8)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top" colspan="5"><bold>Highest level of education, n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">High school or equivalent</td><td align="left" valign="top">21 (19.6)</td><td align="left" valign="top">0 (0)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Technical and further education or associate degree</td><td align="left" valign="top">18 (16.8)</td><td align="left" valign="top">3 (3.5)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Bachelor degree</td><td align="left" valign="top">29 (27.1)</td><td align="left" valign="top">18 (21)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Postgraduate diploma or graduate certificate</td><td align="left" valign="top">15 (13.1)</td><td align="left" valign="top">8 (9)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Masters degree</td><td align="left" valign="top">17 (15.9)</td><td align="left" valign="top">44 (51)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Doctoral degree or Doctor of Philosophy</td><td align="left" valign="top">8 (7.5)</td><td align="left" valign="top">13 (15)</td></tr><tr><td align="left" valign="top" colspan="5"><bold>Profession, n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Clinical psychologist</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">21 (24)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">General practitioner</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">1 (1)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Generalist psychologist</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">13 (15)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Mental health management</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">2 (2)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Mental health nurse</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">15 (17)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Occupational therapist</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">4 (5)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Peer or lived experience worker</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">3 (3.5)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Psychiatrist or psychiatry registrar</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">4 (5)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Social worker</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">19 (22)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="2">Therapist or counselor</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">4 (5)</td></tr><tr><td align="left" valign="top" colspan="5"><bold>Clinical and service use characteristics</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="4"><bold>Ever had a previous diagnosis or had significant difficulties with your mental health or emotional well-being? n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">Yes</td><td align="left" valign="top">76 (70.4)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">No</td><td align="left" valign="top">32 (29.6)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top" colspan="4"><bold>Have you ever seen a health professional for mental health concerns? n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">Yes</td><td align="left" valign="top">74 (68.5)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">No</td><td align="left" valign="top">29 (26.9)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top"/><td align="left" valign="top">Missing</td><td align="left" valign="top">5 (4.6)</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top" colspan="3">K10, mean (SD)</td><td align="left" valign="top">22.8 (8.9)</td><td align="left" valign="top">&#x2014;</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>CM: community member.</p></fn><fn id="table1fn2"><p><sup>b</sup>MHP: mental health professional.</p></fn><fn id="table1fn3"><p><sup>c</sup>Not applicable.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Technology Comfort, AI Attitudes, and AI Use Intention</title><p>In terms of comfort with using digital technology, 79% (68/86) of MHPs and 82% (89/108) of CMs rated themselves as being very comfortable, somewhat comfortable, or comfortable, whereas 22% (19/86) of MHPs and 18% (19/108) of CMs described themselves as being somewhat or very uncomfortable. <xref ref-type="table" rid="table2">Table 2</xref> shows responses to the AI attitudes scale. CMs had neutral attitudes and MHPs tended to have more positive attitudes toward AI across all measured dimensions. <xref ref-type="table" rid="table3">Tables 3</xref> and <xref ref-type="table" rid="table4">4</xref> show that MHPs also tend to be more interested in using AI and more are more likely to use it in the future for work purposes than CMs are to use AI to manage emotional and mental well-being.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>AI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> attitudes scale for CMs<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup> and MHPs<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup>.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">AI attitudes scale (1=not at all; 10=completely agree)</td><td align="left" valign="bottom">CMs (n=95), mean (SD)</td><td align="left" valign="bottom">MHPs (n=82), mean (SD)</td></tr></thead><tbody><tr><td align="left" valign="top">I believe that AI will improve my life</td><td align="left" valign="top">5.15 (2.7)</td><td align="left" valign="top">6.62 (2.5)</td></tr><tr><td align="left" valign="top">I believe that AI will improve my work</td><td align="left" valign="top">5.52 (3.0)</td><td align="left" valign="top">6.70 (2.8)</td></tr><tr><td align="left" valign="top">I think I will use AI technology in the future</td><td align="left" valign="top">6.79 (3.0)</td><td align="left" valign="top">7.63 (2.4)</td></tr><tr><td align="left" valign="top">I think AI technology is positive for humanity</td><td align="left" valign="top">5.05 (2.7)</td><td align="left" valign="top">6.00 (2.4)</td></tr><tr><td align="left" valign="top">Average score</td><td align="left" valign="top">5.63 (2.5)</td><td align="left" valign="top">6.74 (2.3)</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table2fn2"><p><sup>b</sup>CM: community member.</p></fn><fn id="table2fn3"><p><sup>c</sup>MHP: mental health professional.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Community member interest in the use of artificial intelligence.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top" colspan="2">Questions</td><td align="left" valign="top">Values</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3"><bold>How interested are you in using AI to support your mental health and emotional well-being? (n=95), n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Not interested at all</td><td align="left" valign="top">26 (27)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Slightly interested</td><td align="left" valign="top">16 (17)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Somewhat interested</td><td align="left" valign="top">23 (24)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Moderately interested</td><td align="left" valign="top">13 (14)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Extremely interested</td><td align="left" valign="top">17 (18)</td></tr><tr><td align="left" valign="top" colspan="3"><bold>How likely are you to use AI tools in future to support your mental health and emotional well-being? (n=89), n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Very unlikely</td><td align="left" valign="top">15 (17)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Unlikely</td><td align="left" valign="top">9 (10)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Somewhat unlikely</td><td align="left" valign="top">9 (10)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Neither likely no unlikely</td><td align="left" valign="top">13 (15)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Somewhat likely</td><td align="left" valign="top">21 (24)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Likely</td><td align="left" valign="top">15 (17)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Very likely</td><td align="left" valign="top">7 (8)</td></tr><tr><td align="left" valign="top" colspan="3"><bold>How likely are you to use AI for the following (0&#x2010;10)? mean (SD)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Mood tracking</td><td align="left" valign="top">5.44 (3)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Therapeutic chatbots</td><td align="left" valign="top">4.48 (3.3)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Personalized recommendations</td><td align="left" valign="top">5.72 (3)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Early detection and monitoring</td><td align="left" valign="top">5.28 (3.2)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Crisis intervention support</td><td align="left" valign="top">4.47 (3.1)</td></tr></tbody></table></table-wrap><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Mental health professional interest in the use of artificial intelligence.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top" colspan="2">Questions</td><td align="left" valign="top">Values</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3"><bold>How interested are you in using AI to assist with tasks in your role as a mental health professional? (n=82), n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Not interested at all</td><td align="left" valign="top">8 (10)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Slightly interested</td><td align="left" valign="top">9 (11)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Somewhat interested</td><td align="left" valign="top">13 (16)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Moderately interested</td><td align="left" valign="top">23 (28)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Extremely interested</td><td align="left" valign="top">29 (35)</td></tr><tr><td align="left" valign="top" colspan="3"><bold>How likely are you to use these and other AI tools in future to support your work? (n=74), n (%)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Very unlikely</td><td align="left" valign="top">3 (4)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Unlikely</td><td align="left" valign="top">3 (4)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Somewhat unlikely</td><td align="left" valign="top">5 (7)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Neither likely no unlikely</td><td align="left" valign="top">10 (14)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Somewhat likely</td><td align="left" valign="top">17 (23)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Likely</td><td align="left" valign="top">13 (18)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Very likely</td><td align="left" valign="top">23 (31)</td></tr><tr><td align="left" valign="top" colspan="3"><bold>How likely are you to use AI for the following (0&#x2010;10), mean (SD)</bold></td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Assessment and diagnosis</td><td align="left" valign="top">6.12 (3)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Provide personalized treatment recommendations to clients</td><td align="left" valign="top">6.14 (2.9)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Track and guide client progress</td><td align="left" valign="top">7.00 (2.6)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Enhancing client engagement</td><td align="left" valign="top">5.94 (3)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Administrative assistance</td><td align="left" valign="top">8.16 (2)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Literature and research analysis</td><td align="left" valign="top">8.07 (2.2)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Training and simulation</td><td align="left" valign="top">7.43 (2.5)</td></tr></tbody></table></table-wrap></sec><sec id="s3-3"><title>AI Use Cases</title><p><xref ref-type="table" rid="table3">Table 3</xref> shows that CMs tended to rate their likelihood of using AI for a range of tasks associated with managing their emotional and mental well-being midway between unlikely and likely. The use cases that were most to least popular were (1) providing personal recommendations, (2) mood tracking, (3) detecting early warning signs, (4) use of therapeutic chatbots, and (5) crisis or suicide prevention support. MHPs on the other hand rated themselves as more likely to use AI in use cases across the board. The use cases that were most to least popular were (1) administrative tasks support, (2) synthesizing the latest clinical evidence, (3) training and simulation, (4) tracking consumer progress, (5) personalized recommendations for clients, (6) assisting with assessment and diagnosis, and (7) enhancing consumer engagement with treatment.</p></sec><sec id="s3-4"><title>Use of AI</title><p>In total, 30 CMs (30/108, 28%) and 37 MHPs (37/86, 43%) reported use of AI in the previous 6 months. Of those, ChatGPT was the most common AI tool used by both CMs (16/30, 52%) and MHPs (20/37, 54%). Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> outlines the reasons respondents provided for using these tools, as well as their experienced benefits, harms, and concerns. Further, 77% (23/30) of CMs and 92% (34/37) of MHPs reported AI to be very beneficial, somewhat beneficial, or beneficial, whereas <named-content content-type="background:#00E676"/>10% (3/30) of CMs and <named-content content-type="background:#00E676"/>3% (1/37) of MHPs found AI to be very harmful, somewhat harmful, or harmful. CMs mainly used these tools to obtain quick advice when emotionally distressed (18/30, 60%) or as a personal therapist or coach they could converse with to help manage their emotional and mental health (14/30, 47%). The most reported benefits were their availability (20/30, 67%), their low cost compared to therapy (18/30, 60%), and their privacy (16/30, 53%). About half of CMs (16/30, 53%) reported that they did not experience harms or concerns. The rest reported a range of concerns, such as responses being too general or not personalized (11/30, 37%) being unsure where their data was going (11/30, 37%). MHPs primarily used these tools to research mental health topics (24/37, 65%) and to assist with report and letter writing (20/37, 54%). Most reported it being helpful (25/37, 68%) and time saving (25/37, 68%). No harms or concerns were experienced by 49% (18/37); however, the rest reported concerns such as the outputs being too general (12/37, 32%), outputs being inaccurate (10/37, 27%), and being uncertain about the ethics of using these tools for these professional purposes (9/37, 24%).</p></sec><sec id="s3-5"><title>Themes and Subthemes of Content Analysis in Free-Text Responses</title><p>Respondents were invited to share any concerns or interests they had regarding the use of AI for their specific purposes. A total of 66 responses were received from CMs, and 50 responses were received from MHPs. Among CMs, sentiment was rated as positive in 13 (20%) comments, negative in 17 (26%) comments, and neutral in 38 (58%) comments. Of those with positive sentiment, most were excited about AI making mental health care more accessible and efficient, more personalized, and better integrated with other technologies. The content of their negative sentiment was the lack of human support, errors and misdiagnosis, and ethical or data privacy concerns.</p><p>For MHPs of the 50 responses sentiment was rated as positive in 12 (24%) comments, negative in 13 (26%) comments, and neutral in 25 (50%) comments. Positive sentiment themes involved increase in efficiency and therefore increased accessibility of mental health care, as well as advanced diagnostics and treatment outcomes. Negative sentiment comments involved concerns about data governance and security, misuse by clinicians, and regulatory challenges (<xref ref-type="table" rid="table5">Tables 5</xref> and <xref ref-type="table" rid="table6">6</xref>).</p><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Analysis of community memebers&#x2019; positive and negative sentiment themes on the future of AI<sup><xref ref-type="table-fn" rid="table5fn1">a</xref></sup> use in mental health care.</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Theme</td><td align="left" valign="bottom">Description</td><td align="left" valign="bottom">Quote</td><td align="left" valign="bottom">Community members, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4"><bold>Positive sentiment</bold></td><td align="left" valign="top">13 (20)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Optimism about accessibility and efficiency</td><td align="left" valign="top">Many are excited about AI&#x2019;s potential to make mental health care more accessible and efficient, especially in underserved or remote areas.</td><td align="left" valign="top">&#x201C;AI can provide constant, instant, and affordable support for everyone who needs it.&#x201D;</td><td align="left" valign="top">12 (92)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Excitement about technological advancements</td><td align="left" valign="top">Some express a general excitement about the integration of cutting-edge technology in mental health and its potential to revolutionize care.</td><td align="left" valign="top">&#x201C;There are so many possibilities and it can be revolutionary for mental health diagnosis and treatment.&#x201D;</td><td align="left" valign="top">10 (77)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Potential for personalized care</td><td align="left" valign="top">There is enthusiasm for how AI can personalize treatment plans based on individual needs and historical data</td><td align="left" valign="top">&#x201C;Tailored support for young people to be supported in a way that suits them.&#x201D;</td><td align="left" valign="top">7 (54)</td></tr><tr><td align="left" valign="top" colspan="4"><bold>Negative sentiment</bold></td><td align="left" valign="top">17 (26)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Concerns about lack of human connection</td><td align="left" valign="top">Respondents express concern that AI might not provide the empathetic and nuanced interaction that a human therapist offers</td><td align="left" valign="top">&#x201C;Lack of human connection increasing the issues that harm mental health in the first place.&#x201D;</td><td align="left" valign="top">10 (59)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Ethical and privacy concerns</td><td align="left" valign="top">Concerns regarding the ethical use of AI and data privacy issues are significant, with worries about how sensitive data is handled.</td><td align="left" valign="top">&#x201C;Concerned about the privacy of therapy sessions when AI is involved.&#x201D;</td><td align="left" valign="top">9 (53)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Worries about misdiagnosis or lack of sensitivity</td><td align="left" valign="top">Some fear that AI may not correctly interpret complex human emotions and could lead to misdiagnosis or inappropriate treatment suggestions.</td><td align="left" valign="top">&#x201C;AI not being able to pick up on serious distress signals that a human would notice.&#x201D;</td><td align="left" valign="top">8 (47)</td></tr></tbody></table><table-wrap-foot><fn id="table5fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t6" position="float"><label>Table 6.</label><caption><p>Analysis of mental health professionals&#x2019; positive and negative sentiment themes on the future of AI<sup><xref ref-type="table-fn" rid="table6fn1">a</xref></sup> use in mental health care for mental health professionals.</p></caption><table id="table6" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Theme</td><td align="left" valign="bottom">Description</td><td align="left" valign="bottom">Quote</td><td align="left" valign="bottom">Mental health professionals, n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="4"><bold>Positive sentiment</bold></td><td align="left" valign="top">12 (24)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Technological potential and benefits</td><td align="left" valign="top">Positive views on how AI can enhance the efficiency, accessibility, and quality of mental health care.</td><td align="left" valign="top">&#x201C;The potential to deliver quality, timely, relevant health care information that allows patient to make more informed choices for their treatment.&#x201D;</td><td align="left" valign="top">12 (100)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Technological advancements</td><td align="left" valign="top">Excitement about specific AI technologies that may improve mental health diagnostics and treatment.</td><td align="left" valign="top">&#x201C;Big data simulations of neural processing; simulations of neurolinguistic indicators of treatment engagement and response, LLM<sup><xref ref-type="table-fn" rid="table6fn2">b</xref></sup>-based mental health co-pilots for both clinicians and patients, no more bloody referral letters!&#x201D;</td><td align="left" valign="top">5 (42)</td></tr><tr><td align="left" valign="top" colspan="4"><bold>Negative sentiment</bold></td><td align="left" valign="top">13 (26)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Risks and misuse</td><td align="left" valign="top">Concerns over potential negative impacts of AI, including risks of misuse by clinicians.</td><td align="left" valign="top">&#x201C;I have some concern that clinicians may overly rely on AI decisions or outputs that they do not critically analyse the outputs when they make clinical decisions.&#x201D;</td><td align="left" valign="top">13 (100)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Ethical and regulatory challenges</td><td align="left" valign="top">Concerns about the lack of adequate ethical guidelines and regulations for AI in health care and the interaction with registered professionals</td><td align="left" valign="top">&#x201C; &#x2026;there need to be enough guardrails to make it safe.&#x201D; &#x201C;there is no clinical judgement in AI and the use of this to replace things only clinicians should be practicing.&#x201D;</td><td align="left" valign="top">5 (38.5)</td></tr><tr><td align="left" valign="top"/><td align="left" valign="top">Data governance and security</td><td align="left" valign="top">Concerns about how data is managed and protected, focusing on issues like privacy, security, and confidentiality.</td><td align="left" valign="top">&#x201C;Data governance will be tricky.&#x201D; &#x201C;&#x2026;AI can be used and abused by companies&#x201D; &#x201C;data can be shared and sold and then used to manipulate people.&#x201D; &#x201C;Confidentiality and only as good as the data in the internet- reflects status quo not creative potential.&#x201D;</td><td align="left" valign="top">4 (31)</td></tr></tbody></table><table-wrap-foot><fn id="table6fn1"><p><sup>a</sup>AI: artificial intelligence.</p></fn><fn id="table6fn2"><p><sup>b</sup>LLM: large language model.</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This study is to our knowledge the first to survey both CMs and MHPs on their patterns of use, experiences, and perceived benefits and harms associated with the application of AI technologies in mental health care. This analysis provides a critical insight into how AI is currently being used to support mental health care from the perspective of CMs and MHPs, which may inform technological development and guide ethical, professional, and policy initiatives.</p><p>Attitudes to AI between the groups varied. CMs scored similarly to published community norms on the AI Attitudes Scale [<xref ref-type="bibr" rid="ref25">25</xref>] (full scale average score 5.63, SD 2.5 vs 5.54, SD 1.78), while MHPs scored significantly higher (full scale average score 6.74, SD 2.3 vs 5.54, SD 1.78). AI use cases for CMs also had lower levels of endorsement than AI use cases for MHPs. Of note, all CM use cases involved scenarios where AI would be used directly to support personal mental health, whereas MHP use cases were split between indirect or administrative professional tasks and direct client mental health support tasks, the former being more likely to be endorsed. Potentially this is due to direct client use cases conceivably carrying more risk, making CMs and MHPs alike wary of using AI in this way. The intended purpose of commercial AI tools is also more aligned with professional support functionalities than direct mental health care applications. The difference highlights a significant area for future development and the necessity of balancing technological advancement with the training and education of both MHPs and CMs in safe use. The difference in use case endorsements also demonstrates that MHPs and CMs experience different pain points in their day-to-day lives. In the challenging context of embedding new technologies into mental health practice, equal consideration should be given to how AI technology can address these pain points for both groups.</p><p>Regarding actual use, we found that AI tools, most commonly ChatGPT, were used by around a third of CMs and 40% of MHPs. CMs tended to use these tools to obtain quick mental health advice or to receive emotional support, and nearly half used them as a personal coach or therapist, reporting the benefits being accessibility, privacy, and low-cost. These tools and associated AI techniques have been recognized for their potential to make mental health care more accessible, accurate, and efficient [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref27">27</xref>]. It is important to note, however, that these commercially available AI tools are not intended for such purposes, and as such, may present predictable and unpredictable risks [<xref ref-type="bibr" rid="ref27">27</xref>]. About half reported experiencing harms or concerns as a result of use, noting that responses were too general, nonpersonalized, inaccurate, or unhelpful. Further, a lack of clarity regarding data security and the ethics of using AI tools in this way was also reported. These kinds of issues can create harm in some cases. For example, Tessa, an integrated rule-based and large language model AI chatbot designed to support patients with eating disorders [<xref ref-type="bibr" rid="ref28">28</xref>] had to be withdrawn when it started to provide weight loss advice that ran counter to eating disorder clinical guidelines, sparking calls for greater regulatory measures for the safety of these tools in those and other contexts [<xref ref-type="bibr" rid="ref29">29</xref>]. Survey respondents also expressed concerns regarding the lack of human support and potential for misdiagnosis, which reflects a need for cautious and informed integration of AI into mental health practices, particularly where AI is used to predict illness or risk states [<xref ref-type="bibr" rid="ref14">14</xref>].</p><p>MHPs report using AI tools to support research, administrative, and training tasks, with a substantial number reporting time-saving benefits. Nonetheless, approximately a third of MHPs indicated concerns about the generality and potential inaccuracy of AI outputs, which emphasizes the need for ongoing scrutiny of the quality and application of AI in diverse settings for a wide range of purposes [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref30">30</xref>]. These findings align with the broader discourse in general health care delivery on the integration of AI into care, where efficiency and productivity gains must be balanced with accuracy, reliability, and ethical considerations [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref31">31</xref>].</p><p>The expressed concerns by all respondents regarding data governance, security, and the ethical implications of using AI tools in mental health care were notable. As AI technologies continue to advance, it is paramount that data security and ethical use are prioritized to protect both consumers and professionals, and to maintain trust in these tools [<xref ref-type="bibr" rid="ref20">20</xref>]. As outlined by Luxton [<xref ref-type="bibr" rid="ref32">32</xref>] a decade ago, psychologists and other mental health care professionals have an essential part to play in the development, evaluation, and ethical use of AI technologies. In a field still grappling to retrofit regulation for non-AI digital health tools, the rapid development of AI health technology and the associated avalanche of personal health data, &#x201C;blackbox&#x201D; processing, and data sharing requires swift action to put the necessary safeguard structures in place.</p><p>This study has some limitations. First, the online recruitment strategy may have attracted more respondents familiar with technology, although approximately 1 in 5 reported some discomfort with technology use. Second, the relatively small sample size and recruitment method means that the results may not be fully representative of the broader population and therefore limit the generalizability of the findings. This limitation is expected to be addressed in subsequent similar surveys, which will track the acceptability and perceived concerns and issues of using AI in mental health care, over time. Third, the reporting of benefits and harms may be an underestimate as use may have hidden or time-delayed effects. Nevertheless, the findings provide a useful insight into how AI is both currently perceived and experienced by users. Future research could build on these preliminary findings with larger and more diverse samples, potentially through cross-jurisdictional studies that can provide a more comprehensive view of the impact of AI on mental health care.</p></sec><sec id="s4-2"><title>Conclusions</title><p>Our study underscores the promise and challenges of AI in mental health care. As AI tools evolve, it is essential that they are developed with ethics, inclusivity, accuracy, safety and the genuine needs of end users in mind. This will not only guide technological advancement but also ensure that AI serves as a valuable complement to overwhelmed traditional mental health services, ultimately improving outcomes and efficiencies for all stakeholders involved.</p></sec></sec></body><back><ack><p>The authors would like to thank Ali Morrow, BBusJourn, Fern Nicholls, BA, and Prof Blake Dear, PhD, for their assistance in disseminating the survey. We are also grateful to the people who gave their time to complete the survey.</p></ack><notes><sec><title>Data Availability</title><p>Deidentified data can be made available upon request.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">CM</term><def><p>community member</p></def></def-item><def-item><term id="abb3">K10</term><def><p>Kessler 10-Item Scale</p></def></def-item><def-item><term id="abb4">MHP</term><def><p>mental health professional</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gore</surname><given-names>FM</given-names> </name><name name-style="western"><surname>Bloem</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Patton</surname><given-names>GC</given-names> </name><etal/></person-group><article-title>Global burden of disease in young people aged 10&#x2013;24 years: a systematic analysis</article-title><source>Lancet</source><year>2011</year><month>06</month><volume>377</volume><issue>9783</issue><fpage>2093</fpage><lpage>2102</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(11)60512-6</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>PS</given-names> </name><name name-style="western"><surname>Angermeyer</surname><given-names>M</given-names> </name><name name-style="western"><surname>Borges</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Delay and failure in treatment seeking after first onset of mental disorders in the World Health Organization&#x2019;s World Mental Health Survey initiative</article-title><source>World Psychiatry</source><year>2007</year><month>10</month><volume>6</volume><issue>3</issue><fpage>177</fpage><lpage>185</lpage><pub-id pub-id-type="medline">18188443</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Whiteford</surname><given-names>HA</given-names> </name><name name-style="western"><surname>Buckingham</surname><given-names>WJ</given-names> </name><name name-style="western"><surname>Harris</surname><given-names>MG</given-names> </name><etal/></person-group><article-title>Estimating treatment rates for mental disorders in Australia</article-title><source>Aust Health Rev</source><year>2014</year><month>02</month><volume>38</volume><issue>1</issue><fpage>80</fpage><lpage>85</lpage><pub-id pub-id-type="doi">10.1071/AH13142</pub-id><pub-id pub-id-type="medline">24308925</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="web"><article-title>National study of mental health and wellbeing</article-title><source>Australian Bureau of Statistics</source><access-date>2023-05-22</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.abs.gov.au/statistics/health/mental-health/national-study-mental-health-and-wellbeing/latest-release">https://www.abs.gov.au/statistics/health/mental-health/national-study-mental-health-and-wellbeing/latest-release</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kavanagh</surname><given-names>BE</given-names> </name><name name-style="western"><surname>Corney</surname><given-names>KB</given-names> </name><name name-style="western"><surname>Beks</surname><given-names>H</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>LJ</given-names> </name><name name-style="western"><surname>Quirk</surname><given-names>SE</given-names> </name><name name-style="western"><surname>Versace</surname><given-names>VL</given-names> </name></person-group><article-title>A scoping review of the barriers and facilitators to accessing and utilising mental health services across regional, rural, and remote Australia</article-title><source>BMC Health Serv Res</source><year>2023</year><month>10</month><day>4</day><volume>23</volume><issue>1</issue><fpage>1060</fpage><pub-id pub-id-type="doi">10.1186/s12913-023-10034-4</pub-id><pub-id pub-id-type="medline">37794469</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Green</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Albanese</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Shapiro</surname><given-names>NM</given-names> </name><name name-style="western"><surname>Aarons</surname><given-names>GA</given-names> </name></person-group><article-title>The roles of individual and organizational factors in burnout among community-based mental health service providers</article-title><source>Psychol Serv</source><year>2014</year><month>02</month><volume>11</volume><issue>1</issue><fpage>41</fpage><lpage>49</lpage><pub-id pub-id-type="doi">10.1037/a0035299</pub-id><pub-id pub-id-type="medline">24564442</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>O&#x2019;Connor</surname><given-names>K</given-names> </name><name name-style="western"><surname>Muller Neff</surname><given-names>D</given-names> </name><name name-style="western"><surname>Pitman</surname><given-names>S</given-names> </name></person-group><article-title>Burnout in mental health professionals: a systematic review and meta-analysis of prevalence and determinants</article-title><source>Eur Psychiatry</source><year>2018</year><month>09</month><volume>53</volume><fpage>74</fpage><lpage>99</lpage><pub-id pub-id-type="doi">10.1016/j.eurpsy.2018.06.003</pub-id><pub-id pub-id-type="medline">29957371</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lupton</surname><given-names>D</given-names> </name></person-group><article-title>The digitally engaged patient: self-monitoring and self-care in the digital health era</article-title><source>Soc Theory Health</source><year>2013</year><month>08</month><volume>11</volume><issue>3</issue><fpage>256</fpage><lpage>270</lpage><pub-id pub-id-type="doi">10.1057/sth.2013.10</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bell</surname><given-names>IH</given-names> </name><name name-style="western"><surname>Thompson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Valentine</surname><given-names>L</given-names> </name><name name-style="western"><surname>Adams</surname><given-names>S</given-names> </name><name name-style="western"><surname>Alvarez-Jimenez</surname><given-names>M</given-names> </name><name name-style="western"><surname>Nicholas</surname><given-names>J</given-names> </name></person-group><article-title>Ownership, use of, and interest in digital mental health technologies among clinicians and young people across a spectrum of clinical care needs: cross-sectional survey</article-title><source>JMIR Ment Health</source><year>2022</year><month>05</month><day>11</day><volume>9</volume><issue>5</issue><fpage>e30716</fpage><pub-id pub-id-type="doi">10.2196/30716</pub-id><pub-id pub-id-type="medline">35544295</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nicholas</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bell</surname><given-names>IH</given-names> </name><name name-style="western"><surname>Thompson</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Implementation lessons from the transition to telehealth during COVID-19: a survey of clinicians and young people from youth mental health services</article-title><source>Psychiatry Res</source><year>2021</year><month>05</month><volume>299</volume><fpage>113848</fpage><pub-id pub-id-type="doi">10.1016/j.psychres.2021.113848</pub-id><pub-id pub-id-type="medline">33725578</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Valentine</surname><given-names>L</given-names> </name><name name-style="western"><surname>D&#x2019;Alfonso</surname><given-names>S</given-names> </name><name name-style="western"><surname>Lederman</surname><given-names>R</given-names> </name></person-group><article-title>Recommender systems for mental health apps: advantages and ethical challenges</article-title><source>AI Soc</source><year>2023</year><month>08</month><volume>38</volume><issue>4</issue><fpage>1627</fpage><lpage>1638</lpage><pub-id pub-id-type="doi">10.1007/s00146-021-01322-w</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McCradden</surname><given-names>M</given-names> </name><name name-style="western"><surname>Hui</surname><given-names>K</given-names> </name><name name-style="western"><surname>Buchman</surname><given-names>DZ</given-names> </name></person-group><article-title>Evidence, ethics and the promise of artificial intelligence in psychiatry</article-title><source>J Med Ethics</source><year>2023</year><month>08</month><volume>49</volume><issue>8</issue><fpage>573</fpage><lpage>579</lpage><pub-id pub-id-type="doi">10.1136/jme-2022-108447</pub-id><pub-id pub-id-type="medline">36581457</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>D&#x2019;Alfonso</surname><given-names>S</given-names> </name></person-group><article-title>AI in mental health</article-title><source>Curr Opin Psychol</source><year>2020</year><month>12</month><volume>36</volume><fpage>112</fpage><lpage>117</lpage><pub-id pub-id-type="doi">10.1016/j.copsyc.2020.04.005</pub-id><pub-id pub-id-type="medline">32604065</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Graham</surname><given-names>S</given-names> </name><name name-style="western"><surname>Depp</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>EE</given-names> </name><etal/></person-group><article-title>Artificial intelligence for mental health and mental illnesses: an overview</article-title><source>Curr Psychiatry Rep</source><year>2019</year><month>11</month><day>7</day><volume>21</volume><issue>11</issue><fpage>116</fpage><pub-id pub-id-type="doi">10.1007/s11920-019-1094-0</pub-id><pub-id pub-id-type="medline">31701320</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hirschberg</surname><given-names>J</given-names> </name><name name-style="western"><surname>Manning</surname><given-names>CD</given-names> </name></person-group><article-title>Advances in natural language processing</article-title><source>Science</source><year>2015</year><month>07</month><day>17</day><volume>349</volume><issue>6245</issue><fpage>261</fpage><lpage>266</lpage><pub-id pub-id-type="doi">10.1126/science.aaa8685</pub-id><pub-id pub-id-type="medline">26185244</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Durden</surname><given-names>E</given-names> </name><name name-style="western"><surname>Pirner</surname><given-names>MC</given-names> </name><name name-style="western"><surname>Rapoport</surname><given-names>SJ</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>A</given-names> </name><name name-style="western"><surname>Robinson</surname><given-names>A</given-names> </name><name name-style="western"><surname>Forman-Hoffman</surname><given-names>VL</given-names> </name></person-group><article-title>Changes in stress, burnout, and resilience associated with an 8-week intervention with relational agent &#x201C;Woebot.&#x201D;</article-title><source>Internet Interv</source><year>2023</year><month>09</month><volume>33</volume><fpage>100637</fpage><pub-id pub-id-type="doi">10.1016/j.invent.2023.100637</pub-id><pub-id pub-id-type="medline">37635948</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Malik</surname><given-names>A</given-names> </name></person-group><article-title>OpenAI&#x2019;s ChatGPT now has 100 million weekly active users</article-title><source>TechCrunch</source><year>2023</year><month>11</month><day>6</day><access-date>2024-08-30</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://techcrunch.com/2023/11/06/openais-chatgpt-now-has-100-million-weekly-active-users/">https://techcrunch.com/2023/11/06/openais-chatgpt-now-has-100-million-weekly-active-users/</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Tan</surname><given-names>S</given-names> </name></person-group><article-title>Awareness versus usage of ChatGPT in Australia: how do they vary demographically?</article-title><source>YouGov</source><year>2023</year><month>06</month><day>8</day><access-date>2023-08-12</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://business.yougov.com/content/8400-awareness-versus-usage-of-chatgpt-in-australia-how-do-they-vary-demographically">https://business.yougov.com/content/8400-awareness-versus-usage-of-chatgpt-in-australia-how-do-they-vary-demographically</ext-link></comment></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Denejkina</surname><given-names>A</given-names> </name></person-group><article-title>Young people&#x2019;s perception and use of generative AI</article-title><source>Youth Insight</source><year>2023</year><month>06</month><day>27</day><access-date>2023-08-12</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://youthinsight.com.au/education/young-peoples-perception-and-use-of-generative-ai/">https://youthinsight.com.au/education/young-peoples-perception-and-use-of-generative-ai/</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name></person-group><article-title>ChatGPT and mental healthcare: balancing benefits with risks of harms</article-title><source>BMJ Ment Health</source><year>2023</year><month>11</month><volume>26</volume><issue>1</issue><fpage>e300884</fpage><pub-id pub-id-type="doi">10.1136/bmjment-2023-300884</pub-id><pub-id pub-id-type="medline">37949485</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doraiswamy</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Bodner</surname><given-names>K</given-names> </name></person-group><article-title>Artificial intelligence and the future of psychiatry: insights from a global physician survey</article-title><source>Artif Intell Med</source><year>2020</year><month>01</month><volume>102</volume><fpage>101753</fpage><pub-id pub-id-type="doi">10.1016/j.artmed.2019.101753</pub-id><pub-id pub-id-type="medline">31980092</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ayers</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Poliak</surname><given-names>A</given-names> </name><name name-style="western"><surname>Dredze</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Comparing physician and artificial intelligence chatbot responses to patient questions posted to a public social media forum</article-title><source>JAMA Intern Med</source><year>2023</year><month>06</month><day>1</day><volume>183</volume><issue>6</issue><fpage>589</fpage><lpage>596</lpage><pub-id pub-id-type="doi">10.1001/jamainternmed.2023.1838</pub-id><pub-id pub-id-type="medline">37115527</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gooding</surname><given-names>P</given-names> </name></person-group><article-title>Mapping the rise of digital mental health technologies: emerging issues for law and society</article-title><source>Int J Law Psychiatry</source><year>2019</year><volume>67</volume><fpage>101498</fpage><pub-id pub-id-type="doi">10.1016/j.ijlp.2019.101498</pub-id><pub-id pub-id-type="medline">31785726</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kessler</surname><given-names>RC</given-names> </name><name name-style="western"><surname>Andrews</surname><given-names>G</given-names> </name><name name-style="western"><surname>Colpe</surname><given-names>LJ</given-names> </name><etal/></person-group><article-title>Short screening scales to monitor population prevalences and trends in non-specific psychological distress</article-title><source>Psychol Med</source><year>2002</year><month>08</month><volume>32</volume><issue>6</issue><fpage>959</fpage><lpage>976</lpage><pub-id pub-id-type="doi">10.1017/s0033291702006074</pub-id><pub-id pub-id-type="medline">12214795</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Grassini</surname><given-names>S</given-names> </name></person-group><article-title>Development and validation of the AI attitude scale (AIAS-4): a brief measure of general attitude toward artificial intelligence</article-title><source>Front Psychol</source><year>2023</year><month>07</month><day>24</day><volume>14</volume><fpage>1191628</fpage><pub-id pub-id-type="doi">10.3389/fpsyg.2023.1191628</pub-id><pub-id pub-id-type="medline">37554139</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thomas</surname><given-names>J</given-names> </name><name name-style="western"><surname>Harden</surname><given-names>A</given-names> </name></person-group><article-title>Methods for the thematic synthesis of qualitative research in systematic reviews</article-title><source>BMC Med Res Methodol</source><year>2008</year><month>07</month><day>10</day><volume>8</volume><fpage>45</fpage><pub-id pub-id-type="doi">10.1186/1471-2288-8-45</pub-id><pub-id pub-id-type="medline">18616818</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>EE</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>De Choudhury</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Artificial intelligence for mental health care: clinical applications, barriers, facilitators, and artificial wisdom</article-title><source>Biol Psychiatry Cogn Neurosci Neuroimaging</source><year>2021</year><month>09</month><volume>6</volume><issue>9</issue><fpage>856</fpage><lpage>864</lpage><pub-id pub-id-type="doi">10.1016/j.bpsc.2021.02.001</pub-id><pub-id pub-id-type="medline">33571718</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chan</surname><given-names>WW</given-names> </name><name name-style="western"><surname>Fitzsimmons-Craft</surname><given-names>EE</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>AC</given-names> </name><etal/></person-group><article-title>The challenges in designing a prevention chatbot for eating disorders: observational study</article-title><source>JMIR Form Res</source><year>2022</year><month>01</month><day>19</day><volume>6</volume><issue>1</issue><fpage>e28003</fpage><pub-id pub-id-type="doi">10.2196/28003</pub-id><pub-id pub-id-type="medline">35044314</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sharp</surname><given-names>G</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name><name name-style="western"><surname>West</surname><given-names>ML</given-names> </name></person-group><article-title>Ethical challenges in AI approaches to eating disorders</article-title><source>J Med Internet Res</source><year>2023</year><month>08</month><day>14</day><volume>25</volume><fpage>e50696</fpage><pub-id pub-id-type="doi">10.2196/50696</pub-id><pub-id pub-id-type="medline">37578836</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Russell</surname><given-names>RG</given-names> </name><name name-style="western"><surname>Lovett Novak</surname><given-names>L</given-names> </name><name name-style="western"><surname>Patel</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Competencies for the use of artificial intelligence-based tools by health care professionals</article-title><source>Acad Med</source><year>2023</year><month>03</month><day>1</day><volume>98</volume><issue>3</issue><fpage>348</fpage><lpage>356</lpage><pub-id pub-id-type="doi">10.1097/ACM.0000000000004963</pub-id><pub-id pub-id-type="medline">36731054</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Esmaeilzadeh</surname><given-names>P</given-names> </name></person-group><article-title>Use of AI-based tools for healthcare purposes: a survey study from consumers&#x2019; perspectives</article-title><source>BMC Med Inform Decis Mak</source><year>2020</year><month>07</month><day>22</day><volume>20</volume><issue>1</issue><fpage>170</fpage><pub-id pub-id-type="doi">10.1186/s12911-020-01191-1</pub-id><pub-id pub-id-type="medline">32698869</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Luxton</surname><given-names>DD</given-names> </name></person-group><article-title>Artificial intelligence in psychological practice: current and future applications and implications</article-title><source>Prof Psychol Res Pr</source><year>2014</year><volume>45</volume><issue>5</issue><fpage>332</fpage><lpage>339</lpage><pub-id pub-id-type="doi">10.1037/a0034559</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Tables for artificial intelligence (AI) tool experience for the subset of the community members sample who used AI and AI tool experience for the subset of the mental health professionals sample who used AI tools.</p><media xlink:href="mental_v11i1e60589_app1.docx" xlink:title="DOCX File, 23 KB"/></supplementary-material></app-group></back></article>