<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="editorial"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id><journal-id journal-id-type="publisher-id">mental</journal-id><journal-id journal-id-type="index">16</journal-id><journal-title>JMIR Mental Health</journal-title><abbrev-journal-title>JMIR Ment Health</abbrev-journal-title><issn pub-type="epub">2368-7959</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e70439</article-id><article-id pub-id-type="doi">10.2196/70439</article-id><article-categories><subj-group subj-group-type="heading"><subject>Editorial</subject></subj-group></article-categories><title-group><article-title>Responsible Design, Integration, and Use of Generative AI in Mental Health</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Asman</surname><given-names>Oren</given-names></name><degrees>LLM, LLD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Torous</surname><given-names>John</given-names></name><degrees>MBI, MD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Tal</surname><given-names>Amir</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Nursing, Faculty of Medical and Health Sciences, Tel Aviv University</institution><addr-line>P.O.B 39040, Ramat Aviv</addr-line><addr-line>Tel Aviv</addr-line><country>Israel</country></aff><aff id="aff2"><institution>The Samueli Initiative for Responsible AI in Medicine, Tel Aviv University</institution><addr-line>Tel Aviv</addr-line><country>Israel</country></aff><aff id="aff3"><institution>Department of Psychiatry, Beth Israel Deaconess Medical Center, Harvard Medical School</institution><addr-line>Cambridge</addr-line><country>United States</country></aff><aff id="aff4"><institution>Faculty of Medical and Health Sciences, Tel Aviv University</institution><addr-line>Tel Aviv</addr-line><country>Israel</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Leung</surname><given-names>Tiffany</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Oren Asman, LLM, LLD, Department of Nursing, Faculty of Medical and Health Sciences, Tel Aviv University, P.O.B 39040, Ramat Aviv, Tel Aviv, 6997801, Israel, 972 547608020; <email>asman@tauex.tau.ac.il</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>all authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>20</day><month>1</month><year>2025</year></pub-date><volume>12</volume><elocation-id>e70439</elocation-id><history><date date-type="received"><day>21</day><month>12</month><year>2024</year></date><date date-type="rev-recd"><day>04</day><month>01</month><year>2025</year></date><date date-type="accepted"><day>06</day><month>01</month><year>2025</year></date></history><copyright-statement>&#x00A9; Oren Asman, John Torous, Amir Tal. Originally published in JMIR Mental Health (<ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org">https://mental.jmir.org</ext-link>), 20.1.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org/">https://mental.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mental.jmir.org/2025/1/e70439"/><abstract><p>Generative artificial intelligence (GenAI) shows potential for personalized care, psychoeducation, and even crisis prediction in mental health, yet responsible use requires ethical consideration and deliberation and perhaps even governance. This is the first published theme issue focused on responsible GenAI in mental health. It brings together evidence and insights on GenAI&#x2019;s capabilities, such as emotion recognition, therapy-session summarization, and risk assessment, while highlighting the sensitive nature of mental health data and the need for rigorous validation. Contributors discuss how bias, alignment with human values, transparency, and empathy must be carefully addressed to ensure ethically grounded, artificial intelligence&#x2013;assisted care. By proposing conceptual frameworks; best practices; and regulatory approaches, including ethics of care and the preservation of socially important humanistic elements, this theme issue underscores that GenAI can complement, rather than replace, the vital role of human empathy in clinical settings. To achieve this, an ongoing collaboration between researchers, clinicians, policy makers, and technologists is essential.</p></abstract><kwd-group><kwd>responsible AI in medicine</kwd><kwd>AI ethics</kwd><kwd>digital mental health ethics</kwd><kwd>artificial intelligence</kwd><kwd>large language model</kwd><kwd>model alignment</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The continued development of generative artificial intelligence (GenAI) and large language models (LLMs) shows potential in many fields, including high-stakes areas such as education, judicial work, security, and health. Utilizing this potential responsibly requires thoughtful deliberation and consideration and the creation of guidelines and conceptual frameworks that encompass the complexities of some of these fields.</p><p>The name of this theme issue reflects its focus&#x2014;&#x201C;Responsible Design, Integration, and Use of Generative AI in Mental Health.&#x201D; The current abilities of GenAI models for language generation and image synthesis already demonstrate their ever-growing potential use in personalized mental health psychoeducation, diagnosis, treatment planning, and interventions. However, integrating any of these applications within the mental health care realm requires careful examination, given the sensitive nature of mental health data, research, and interventions and the various capacities that may be expected of these models in these realms, to be considered of acceptable professional standard. Recent studies highlight the significant ethical challenges posed by GenAI, emphasizing the need for robust governance frameworks to mitigate risks and enhance the trustworthiness of these technologies [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref3">3</xref>].</p><p>This theme issue unites diverse stakeholders in exploring and adding a critical building block for the global challenge of conceptualizing and operationalizing responsible GenAI in mental health. It includes a collection of articles that examine the advantages, challenges, and potential risks associated with deploying GenAI models in mental health care while also proposing guidelines and best practices for their ethical and responsible implementation. Several papers discuss the application of GenAI in clinical settings; the ethical implications of artificial intelligence (AI)&#x2013;driven mental health interventions; and the development of new frameworks to ensure the alignment of GenAI systems with human values, virtues, and ethical standards. These include transparency, accountability, and fairness in AI applications; privacy and data security [<xref ref-type="bibr" rid="ref4">4</xref>]; and authenticity and congruence [<xref ref-type="bibr" rid="ref5">5</xref>].</p><p>The exploration of GenAI&#x2019;s role in mental health is particularly timely, given its rapid adoption and the evolving landscape of digital health technologies. Recent research has highlighted the transformative potential of GenAI in creating personalized mental health interventions that can enhance care delivery and patient outcomes. For instance, GenAI models are already used to generate therapeutic content, simulate dialogues for therapy, and even predict mental health conditions based on language patterns and sentiment analysis [<xref ref-type="bibr" rid="ref6">6</xref>]. However, this potential is accompanied by significant ethical and practical challenges, such as ensuring the accuracy and reliability of AI-generated content and preventing the misuse of these technologies [<xref ref-type="bibr" rid="ref7">7</xref>]. This theme issue provides a platform for in-depth discussions on these topics and proposes actionable insights for the responsible integration of GenAI in mental health care.</p></sec><sec id="s2"><title>Current Capabilities and Limitations</title><p>We begin by exploring GenAI&#x2019;s capabilities and limitations in mental health applications. Although the ever-evolving capacities explored in any research are, by definition, representatives of the time and models examined, the conceptual and normative-related discussions could have longer-term implications and relevance. The first paper, &#x201C;Capacity of Generative AI to Interpret Human Emotions From Visual and Textual Data: Pilot Evaluation Study&#x201D; [<xref ref-type="bibr" rid="ref8">8</xref>], evaluates the ability of ChatGPT-4 and Google Bard to interpret human emotions from both visual and textual data. Using the Reading the Mind in the Eyes Test and the Levels of Emotional Awareness Scale, the study found that ChatGPT-4 performed well in both visual and textual emotion recognition, aligning closely with human standards. Google&#x2019;s GenAI Bard, however, showed limitations in visual emotion interpretation. This paper emphasizes the need for inclusive data and stringent oversight to ensure accurate and reliable emotional recognition by AI systems.</p><p>The second paper, &#x201C;Comparing the Perspectives of Generative AI, Mental Health Experts, and the General Public on Schizophrenia Recovery: Case Vignette Study&#x201D; [<xref ref-type="bibr" rid="ref9">9</xref>], compares the perspectives of GenAI models, mental health professionals, and the public on schizophrenia recovery. The findings show that some AI models align closely with professional views, while others, like ChatGPT-3.5, demonstrate pessimism that could negatively impact patient motivation. The study highlights the potential and limitations of AI in providing clinical prognoses and underscores the need for rigorous validation of AI applications in mental health.</p><p>The third paper, &#x201C;Suicide Risk Assessments Through the Eyes of ChatGPT-3.5 Versus ChatGPT-4: Vignette Study&#x201D; [<xref ref-type="bibr" rid="ref10">10</xref>], examines the capability of ChatGPT models to assess suicide risk based on vignettes. The findings indicate that ChatGPT-4&#x2019;s assessments align more closely with those of mental health professionals compared to ChatGPT-3.5, which often underestimates suicide risk. These findings highlight the potential of advanced AI models to support mental health professionals but also underscore the necessity for further research and careful implementation to ensure accurate and safe use in clinical settings.</p><p>The paper &#x201C;Exploring the Efficacy of Large Language Models in Summarizing Mental Health Counseling Sessions: Benchmark Study&#x201D; [<xref ref-type="bibr" rid="ref11">11</xref>] evaluates the performance of state-of-the-art LLMs in summarizing therapy sessions. By introducing the Mental Health Counseling-Component&#x2013;Guided Dialogue Summaries dataset and assessing task-specific LLMs, like MentalLlama, Mistral, and MentalBART, the study demonstrates their promise while emphasizing their current limitations in terms of clinical applicability. Expert assessments revealed the need for further refinement and validation before such tools can be integrated into practice.</p><p>Another key contribution, &#x201C;Large Language Models Versus Expert Clinicians in Crisis Prediction Among Telemental Health Patients: Comparative Study&#x201D; [<xref ref-type="bibr" rid="ref12">12</xref>], compares GPT-4&#x2019;s performance with that of senior clinicians in predicting suicide crises based on intake data. Although GPT-4 approached clinician-level performance in some metrics, its reliability was limited by sensitivity and bias issues. The study underscores the potential such tools have for augmenting crises prediction but highlights the need for additional safety measures and validation.</p></sec><sec id="s3"><title>Ethical and Humanistic Considerations</title><p>Herein, we delve into the ethical and humanistic considerations of GenAI in mental health.</p><p>In &#x201C;Exploring Bias(es) of Large Language Models in the Field of Mental Health &#x2013; A Comparative Study Investigating the Effect of Gender and Sexual Orientation in Anorexia Nervosa and Bulimia Nervosa Case Vignettes&#x201D; [<xref ref-type="bibr" rid="ref13">13</xref>], the authors showed that LLMs assigned lower mental health&#x2013;related quality of life scores to men compared to women with a similar eating disorder severity, with no real-world epidemiological evidence for such a pattern. This may reflect historical underrepresentation and societal biases in the data used for training the model and raises questions about how such biases can be mitigated by users as well as developers.</p><p>Next, we address the ethical implications of humanizing AI and the importance of empathy in therapeutic contexts.</p><p>The paper &#x201C;The Role of Humanization and Robustness of Large Language Models in Conversational Artificial Intelligence for Individuals With Depression: A Critical Analysis&#x201D; [<xref ref-type="bibr" rid="ref14">14</xref>] explores the use of LLMs, such as OpenAI&#x2019;s ChatGPT-4, in mental health care. It highlights their potential to offer personalized therapeutic support for patients with depression through context-aware interactions. However, it also identifies significant ethical and technical challenges, including the risks of humanizing LLMs and their lack of contextualized robustness. Humanization can lead to unrealistic expectations and overtrust, while inadequate robustness may cause inconsistent and potentially harmful responses. The authors recommend clear communication of AI limitations, fine-tuning with high-quality data, and interdisciplinary research to responsibly integrate LLMs in mental health care, thereby enhancing patient support while minimizing risks.</p><p>The paper &#x201C;The Machine Speaks: Conversational AI and the Importance of Effort to Relationships of Meaning&#x201D; [<xref ref-type="bibr" rid="ref15">15</xref>] explores the implications of using conversational AI in place of human effort in interpersonal relationships. The authors emphasize that effort in relationships conveys intrinsic value and meaning, which can be lost when machines take over these interactions. They discuss the importance of maintaining human effort in therapeutic contexts to preserve the meaningful engagement and personal growth that come from human-to-human interactions. This paper encourages a critical examination of the potential losses in meaning and opportunities for self-understanding when relying on GenAI.</p><p>Following this, the paper &#x201C;Considering the Role of Human Empathy in AI-Driven Therapy&#x201D; [<xref ref-type="bibr" rid="ref16">16</xref>] addresses the critical role of empathy in therapy. It evaluates whether AI-driven therapy can replicate empathic interactions. The authors define different aspects of empathy, compare the empathic capabilities of humans and GenAI, and discuss when human empathy is most needed in therapeutic settings. They call for ongoing research and dialogue to ensure that AI-mediated therapy maintains the essential human element of empathy, which is crucial for effective therapeutic outcomes.</p><p>In &#x201C;The Artificial Third: A Broad View of the Effects of Introducing Generative Artificial Intelligence on Psychotherapy&#x201D; [<xref ref-type="bibr" rid="ref17">17</xref>], the authors introduce the concept of the &#x201C;artificial third&#x201D; in psychotherapy, following Freud&#x2019;s theory of narcissistic blows. They argue that GenAI represents a significant shift in how we perceive society, interrelationships, and self. They raise important questions about transparency, autonomy, and the irreplaceable human elements in therapy, suggesting that with ethical consideration, the artificial third can enhance but not replace the human touch in therapeutic relationships.</p></sec><sec id="s4"><title>GenAI Alignment With Values and Virtues</title><p>Finally, we consider the alignment of GenAI with human values and regulatory perspectives.</p><p>The study &#x201C;Assessing the Alignment of Large Language Models With Human Values for Mental Health Integration: Cross-Sectional Study Using Schwartz&#x2019;s Theory of Basic Values&#x201D; [<xref ref-type="bibr" rid="ref18">18</xref>] evaluates whether LLMs align with human values, using Schwartz&#x2019;s theory of basic values. The authors found that while this framework can characterize value-like constructs within LLMs, there are significant divergences from human values, raising ethical concerns. They call for standardized alignment processes to ensure that LLMs are integrated into mental health care in a way that respects and reflects diverse human values.</p><p>Another important contribution is the article &#x201C;Regulating AI in Mental Health: Ethics of Care Perspective&#x201D; [<xref ref-type="bibr" rid="ref19">19</xref>], which argues that the dominant responsible AI approach is insufficient because it overlooks the impact of AI on human relationships. The author proposes an ethics of care approach to AI regulation, which addresses AI&#x2019;s impact on human relationships and establishes clear responsibilities for developers. They highlight the potential for emotional manipulation and the risks involved, proposing a series of considerations grounded in the ethics of care for developing AI-powered therapeutic tools.</p><p>Finally, the article &#x201C;An Ethical Perspective on the Democratization of Mental Health With Generative AI&#x201D; [<xref ref-type="bibr" rid="ref20">20</xref>] explores the historical context of democratizing information and argues that GenAI technologies represent a new phase in this movement, offering improved accessibility to mental health knowledge and care. However, it also highlights the significant risks and challenges that need careful consideration. The paper proposes a strategic questionnaire for assessing AI-based mental health applications, advocating for an approach that is both ethically grounded and patient-centered.</p></sec><sec id="s5" sec-type="conclusions"><title>Conclusions</title><p>The papers comprising this special issue make essential and exciting contributions to the field of digital mental health, specifically focusing on the responsible integration and use of GenAI. These studies showcase the already remarkable abilities of LLMs and allude to the potential of integrating GenAI in mental health diagnosis, treatment, rehabilitation, and recovery while also raising awareness of technical, clinical, philosophical, and ethical challenges related to safety and efficacy.</p><p>This theme issue is merely one stepping stone that is part of an ongoing global effort. Responsible AI frameworks for mental health must be adapted and integrated into local and international governance frameworks, thereby acknowledging that the current extraordinary opportunity also presents a profound professional and societal challenge. By fostering ongoing dialogue and collaboration among researchers, clinicians, ethicists, policy makers, and technologists, we can harness the benefits of GenAI to enhance mental health care while upholding principles, values, and virtues fundamental to humanistic care.</p><p>Together, we can ensure that this technology serves as a tool for doing good, augmenting human capabilities while avoiding harm and respecting and retaining the socially important humanistic elements of empathy, authenticity, and connection.</p></sec></body><back><fn-group><fn fn-type="con"><p>Conceptualization: OA, JT, AT</p><p>Project administration: OA, JT, AT</p><p>Supervision: OA, JT</p><p>Validation: OA, AT</p><p>Writing &#x2013; original draft: OA, AT</p><p>Writing &#x2013; review &#x0026; editing: OA, JT, AT</p></fn><fn fn-type="conflict"><p>AT and OA are authors of the paper &#x201C;An Ethical Perspective on the Democratization of Mental Health With Generative AI&#x201D; [<xref ref-type="bibr" rid="ref20">20</xref>] in this theme issue of <italic>JMIR Mental Health</italic>. AT and OA are guest editors for this theme issue. JT is the editor-in-chief of <italic>JMIR Mental Health</italic>.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">GenAI</term><def><p>generative artificial intelligence</p></def></def-item><def-item><term id="abb3">LLM</term><def><p>large language model</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Omar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Levkovich</surname><given-names>I</given-names> </name></person-group><article-title>Exploring the efficacy and potential of large language models for depression: a systematic review</article-title><source>J Affect Disord</source><year>2025</year><month>02</month><day>15</day><volume>371</volume><fpage>234</fpage><lpage>244</lpage><pub-id pub-id-type="doi">10.1016/j.jad.2024.11.052</pub-id><pub-id pub-id-type="medline">39581383</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Oniani</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hilsman</surname><given-names>J</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>From military to healthcare: adopting and expanding ethical principles for generative artificial intelligence</article-title><source>arXiv</source><comment>Preprint posted online on  Aug 4, 2023</comment><pub-id pub-id-type="doi">10.48550/arXiv.2308.02448</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tal</surname><given-names>A</given-names> </name><name name-style="western"><surname>Elyoseph</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Haber</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>The artificial third: utilizing ChatGPT in mental health</article-title><source>Am J Bioeth</source><year>2023</year><month>10</month><volume>23</volume><issue>10</issue><fpage>74</fpage><lpage>77</lpage><pub-id pub-id-type="doi">10.1080/15265161.2023.2250297</pub-id><pub-id pub-id-type="medline">37812102</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Roger</surname><given-names>A</given-names> </name><name name-style="western"><surname>A&#x00EF;meur</surname><given-names>E</given-names> </name><name name-style="western"><surname>Rish</surname><given-names>I</given-names> </name></person-group><article-title>Towards ethical multimodal systems</article-title><source>arXiv</source><comment>Preprint posted online on  Apr 26, 2023</comment><pub-id pub-id-type="doi">10.48550/arXiv.2304.13765</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Asman</surname><given-names>O</given-names> </name><name name-style="western"><surname>Tal</surname><given-names>A</given-names> </name><name name-style="western"><surname>Barilan</surname><given-names>YM</given-names> </name></person-group><article-title>Conversational artificial intelligence-patient alliance Turing test and the search for authenticity</article-title><source>Am J Bioeth</source><year>2023</year><month>05</month><volume>23</volume><issue>5</issue><fpage>62</fpage><lpage>64</lpage><pub-id pub-id-type="doi">10.1080/15265161.2023.2191046</pub-id><pub-id pub-id-type="medline">37130413</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bir&#x00F3;</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cuesta-Vargas</surname><given-names>AI</given-names> </name><name name-style="western"><surname>Szil&#x00E1;gyi</surname><given-names>L</given-names> </name></person-group><article-title>Precognition of mental health and neurogenerative disorders using AI-parsed text and sentiment analysis</article-title><source>Acta Univ Sapient Inform</source><year>2023</year><month>12</month><day>11</day><volume>15</volume><issue>2</issue><fpage>359</fpage><lpage>403</lpage><pub-id pub-id-type="doi">10.2478/ausi-2023-0022</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Blease</surname><given-names>C</given-names> </name><name name-style="western"><surname>Torous</surname><given-names>J</given-names> </name></person-group><article-title>ChatGPT and mental healthcare: balancing benefits with risks of harms</article-title><source>BMJ Ment Health</source><year>2023</year><month>11</month><volume>26</volume><issue>1</issue><fpage>e300884</fpage><pub-id pub-id-type="doi">10.1136/bmjment-2023-300884</pub-id><pub-id pub-id-type="medline">37949485</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elyoseph</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Refoua</surname><given-names>E</given-names> </name><name name-style="western"><surname>Asraf</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lvovsky</surname><given-names>M</given-names> </name><name name-style="western"><surname>Shimoni</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Hadar-Shoval</surname><given-names>D</given-names> </name></person-group><article-title>Capacity of generative AI to interpret human emotions from visual and textual data: pilot evaluation study</article-title><source>JMIR Ment Health</source><year>2024</year><month>02</month><day>6</day><volume>11</volume><fpage>e54369</fpage><pub-id pub-id-type="doi">10.2196/54369</pub-id><pub-id pub-id-type="medline">38319707</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elyoseph</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Levkovich</surname><given-names>I</given-names> </name></person-group><article-title>Comparing the perspectives of generative AI, mental health experts, and the general public on schizophrenia recovery: case vignette study</article-title><source>JMIR Ment Health</source><year>2024</year><month>03</month><day>18</day><volume>11</volume><fpage>e53043</fpage><pub-id pub-id-type="doi">10.2196/53043</pub-id><pub-id pub-id-type="medline">38533615</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Levkovich</surname><given-names>I</given-names> </name><name name-style="western"><surname>Elyoseph</surname><given-names>Z</given-names> </name></person-group><article-title>Suicide risk assessments through the eyes of ChatGPT-3.5 versus ChatGPT-4: vignette study</article-title><source>JMIR Ment Health</source><year>2023</year><month>09</month><day>20</day><volume>10</volume><fpage>e51232</fpage><pub-id pub-id-type="doi">10.2196/51232</pub-id><pub-id pub-id-type="medline">37728984</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Adhikary</surname><given-names>PK</given-names> </name><name name-style="western"><surname>Srivastava</surname><given-names>A</given-names> </name><name name-style="western"><surname>Kumar</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Exploring the efficacy of large language models in summarizing mental health counseling sessions: benchmark study</article-title><source>JMIR Ment Health</source><year>2024</year><month>07</month><day>23</day><volume>11</volume><fpage>e57306</fpage><pub-id pub-id-type="doi">10.2196/57306</pub-id><pub-id pub-id-type="medline">39042893</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mohebbi</surname><given-names>M</given-names> </name><name name-style="western"><surname>O&#x2019;Callaghan</surname><given-names>E</given-names> </name><name name-style="western"><surname>Winsberg</surname><given-names>M</given-names> </name></person-group><article-title>Large language models versus expert clinicians in crisis prediction among telemental health patients: comparative study</article-title><source>JMIR Ment Health</source><year>2024</year><month>08</month><day>2</day><volume>11</volume><fpage>e58129</fpage><pub-id pub-id-type="doi">10.2196/58129</pub-id><pub-id pub-id-type="medline">38876484</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Schnepper</surname><given-names>R</given-names> </name><name name-style="western"><surname>Roemmel</surname><given-names>N</given-names> </name><name name-style="western"><surname>Schaefert</surname><given-names>R</given-names> </name><name name-style="western"><surname>Lambrecht-Walzinger</surname><given-names>L</given-names> </name><name name-style="western"><surname>Meinlschmidt</surname><given-names>G</given-names> </name></person-group><article-title>Exploring bias(es) of large language models in the field of mental health &#x2013; a comparative study investigating the effect of gender and sexual orientation in anorexia nervosa and bulimia nervosa case vignettes</article-title><source>JMIR Preprints</source><comment>Preprint posted online on  Mar 1, 2024</comment><pub-id pub-id-type="doi">10.2196/preprints.57986</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ferrario</surname><given-names>A</given-names> </name><name name-style="western"><surname>Sedlakova</surname><given-names>J</given-names> </name><name name-style="western"><surname>Trachsel</surname><given-names>M</given-names> </name></person-group><article-title>The role of humanization and robustness of large language models in conversational artificial intelligence for individuals with depression: a critical analysis</article-title><source>JMIR Ment Health</source><year>2024</year><month>07</month><day>2</day><volume>11</volume><fpage>e56569</fpage><pub-id pub-id-type="doi">10.2196/56569</pub-id><pub-id pub-id-type="medline">38958218</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hartford</surname><given-names>A</given-names> </name><name name-style="western"><surname>Stein</surname><given-names>DJ</given-names> </name></person-group><article-title>The machine speaks: conversational AI and the importance of effort to relationships of meaning</article-title><source>JMIR Ment Health</source><year>2024</year><month>06</month><day>18</day><volume>11</volume><fpage>e53203</fpage><pub-id pub-id-type="doi">10.2196/53203</pub-id><pub-id pub-id-type="medline">38889401</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rubin</surname><given-names>M</given-names> </name><name name-style="western"><surname>Arnon</surname><given-names>H</given-names> </name><name name-style="western"><surname>Huppert</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Perry</surname><given-names>A</given-names> </name></person-group><article-title>Considering the role of human empathy in AI-driven therapy</article-title><source>JMIR Ment Health</source><year>2024</year><month>06</month><day>11</day><volume>11</volume><fpage>e56529</fpage><pub-id pub-id-type="doi">10.2196/56529</pub-id><pub-id pub-id-type="medline">38861302</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Haber</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Levkovich</surname><given-names>I</given-names> </name><name name-style="western"><surname>Hadar-Shoval</surname><given-names>D</given-names> </name><name name-style="western"><surname>Elyoseph</surname><given-names>Z</given-names> </name></person-group><article-title>The artificial third: a broad view of the effects of introducing generative artificial intelligence on psychotherapy</article-title><source>JMIR Ment Health</source><year>2024</year><month>05</month><day>23</day><volume>11</volume><fpage>e54781</fpage><pub-id pub-id-type="doi">10.2196/54781</pub-id><pub-id pub-id-type="medline">38787297</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hadar-Shoval</surname><given-names>D</given-names> </name><name name-style="western"><surname>Asraf</surname><given-names>K</given-names> </name><name name-style="western"><surname>Mizrachi</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Haber</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Elyoseph</surname><given-names>Z</given-names> </name></person-group><article-title>Assessing the alignment of large language models with human values for mental health integration: cross-sectional study using Schwartz&#x2019;s theory of basic values</article-title><source>JMIR Ment Health</source><year>2024</year><month>04</month><day>9</day><volume>11</volume><fpage>e55988</fpage><pub-id pub-id-type="doi">10.2196/55988</pub-id><pub-id pub-id-type="medline">38593424</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tavory</surname><given-names>T</given-names> </name></person-group><article-title>Regulating AI in mental health: ethics of care perspective</article-title><source>JMIR Ment Health</source><year>2024</year><month>09</month><day>19</day><volume>11</volume><fpage>e58493</fpage><pub-id pub-id-type="doi">10.2196/58493</pub-id><pub-id pub-id-type="medline">39298759</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elyoseph</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Gur</surname><given-names>T</given-names> </name><name name-style="western"><surname>Haber</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>An ethical perspective on the democratization of mental health with generative AI</article-title><source>JMIR Ment Health</source><year>2024</year><month>10</month><day>17</day><volume>11</volume><fpage>e58011</fpage><pub-id pub-id-type="doi">10.2196/58011</pub-id><pub-id pub-id-type="medline">39417792</pub-id></nlm-citation></ref></ref-list></back></article>