<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id><journal-id journal-id-type="publisher-id">mental</journal-id><journal-id journal-id-type="index">16</journal-id><journal-title>JMIR Mental Health</journal-title><abbrev-journal-title>JMIR Ment Health</abbrev-journal-title><issn pub-type="epub">2368-7959</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v12i1e67381</article-id><article-id pub-id-type="doi">10.2196/67381</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Impact of Conversational and Animation Features of a Mental Health App Virtual Agent on Depressive Symptoms and User Experience Among College Students: Randomized Controlled Trial</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Six</surname><given-names>Stephanie</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Schlesener</surname><given-names>Elizabeth</given-names></name><degrees>MFA</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Hill</surname><given-names>Victoria</given-names></name><degrees>BS</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Babu</surname><given-names>Sabarish V</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Byrne</surname><given-names>Kaileigh</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Psychology, Clemson University</institution><addr-line>418 Brackett Hall</addr-line><addr-line>Clemson</addr-line><addr-line>SC</addr-line><country>United States</country></aff><aff id="aff2"><institution>Department of Human-Centered Computing, Clemson University</institution><addr-line>Clemson</addr-line><addr-line>SC</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Torous</surname><given-names>John</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Aly</surname><given-names>Heba</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Feij&#x00F3;o-Garc&#x00ED;a</surname><given-names>Pedro Guillermo</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Kaileigh Byrne, PhD, Department of Psychology, Clemson University, 418 Brackett Hall, Clemson, SC, 29634, United States, 1 8646563935; <email>kaileib@clemson.edu</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>11</day><month>4</month><year>2025</year></pub-date><volume>12</volume><elocation-id>e67381</elocation-id><history><date date-type="received"><day>09</day><month>10</month><year>2024</year></date><date date-type="rev-recd"><day>20</day><month>01</month><year>2025</year></date><date date-type="accepted"><day>06</day><month>02</month><year>2025</year></date></history><copyright-statement>&#x00A9; Stephanie Six, Elizabeth Schlesener, Victoria Hill, Sabarish V Babu, Kaileigh Byrne. Originally published in JMIR Mental Health (<ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org">https://mental.jmir.org</ext-link>), 11.4.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://mental.jmir.org/">https://mental.jmir.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://mental.jmir.org/2025/1/e67381"/><abstract><sec><title>Background</title><p>Numerous mental health apps purport to alleviate depressive symptoms. Strong evidence suggests that brief cognitive behavioral therapy (bCBT)-based mental health apps can decrease depressive symptoms, yet there is limited research elucidating the specific features that may augment its therapeutic benefits. One potential design feature that may influence effectiveness and user experience is the inclusion of virtual agents that can mimic realistic, human face-to-face interactions.</p></sec><sec><title>Objective</title><p>The goal of the current experiment was to determine the effect of conversational and animation features of a virtual agent within a bCBT-based mental health app on depressive symptoms and user experience in college students with and without depressive symptoms.</p></sec><sec sec-type="methods"><title>Methods</title><p>College students (N=209) completed a 2-week intervention in which they engaged with a bCBT-based mental health app with a customizable therapeutic virtual agent that varied in conversational and animation features. A 2 (time: baseline vs 2-week follow-up) &#x00D7; 2 (conversational vs non-conversational agent) &#x00D7; 2 (animated vs non-animated agent) randomized controlled trial was used to assess mental health symptoms (Patient Health Questionnaire-8, Perceived Stress Scale-10, and Response Rumination Scale questionnaires) and user experience (mHealth App Usability Questionnaire, MAUQ) in college students with and without current depressive symptoms. The mental health app usability and qualitative questions regarding users&#x2019; perceptions of their therapeutic virtual agent interactions and customization process were assessed at follow-up.</p></sec><sec sec-type="results"><title>Results</title><p>Mixed ANOVA (analysis of variance) results demonstrated a significant decrease in symptoms of depression (<italic>P</italic>=.002; mean [SD]=5.5 [4.86] at follow-up vs mean [SD]=6.35 [4.71] at baseline)<italic>,</italic> stress (<italic>P</italic>=.005; mean [SD]=15.91 [7.67] at follow-up vs mean [SD]=17.02 [6.81] at baseline)<italic>,</italic> and rumination (<italic>P</italic>=.03; mean [SD]=40.42 [12.96] at follow-up vs mean [SD]=41.92 [13.61] at baseline); however, no significant effect of conversation or animation was observed. Findings also indicate a significant increase in user experience in animated conditions. This significant increase in animated conditions is also reflected in the user&#x2019;s ease of use and satisfaction (<italic>F</italic>(1, 201)=102.60, <italic>P</italic>&#x003C;.001)<italic>,</italic> system information arrangement (<italic>F</italic>(1, 201)=123.12, <italic>P</italic>&#x003C;.001)<italic>,</italic> and usefulness of the application (<italic>F</italic>(1, 201)=3667.62, <italic>P</italic>&#x003C;.001).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>The current experiment provides support for bCBT-based mental health apps featuring customizable, humanlike therapeutic virtual agents and their ability to significantly reduce negative symptomology over a brief timeframe. The app intervention reduced mental health symptoms, regardless of whether the agent included conversational or animation features, but animation features enhanced the user experience. These effects were observed in both users with and without depressive symptoms.</p></sec><sec><title>Trial Registration</title><p>Open Science Framework B2HX5; <ext-link ext-link-type="uri" xlink:href="https://doi.org/10.17605/OSF.IO/B2HX5">https://doi.org/10.17605/OSF.IO/B2HX5</ext-link></p></sec></abstract><kwd-group><kwd>depression</kwd><kwd>mental health app</kwd><kwd>virtual agents</kwd><kwd>cognitive behavioral therapy</kwd><kwd>conversational agents</kwd><kwd>virtual agent</kwd><kwd>animations</kwd><kwd>college student</kwd><kwd>CBT</kwd><kwd>ANOVA</kwd><kwd>randomized controlled trial</kwd><kwd>depressive symptoms</kwd><kwd>mental disorder</kwd><kwd>mental illness</kwd><kwd>user experience</kwd><kwd>mHealth</kwd><kwd>digital health</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>The prevalence of depressive symptoms within the United States drastically increased from 17 million to 21 million, a nearly 25% increase, from 2018 to 2020 during the COVID-19 pandemic [<xref ref-type="bibr" rid="ref1">1</xref>] with young adults and women disproportionately affected [<xref ref-type="bibr" rid="ref2">2</xref>]. To address depressive symptoms, mental health apps have emerged to offer assistance and therapeutic techniques to the public. Cognitive behavioral therapy (CBT)-based mental health apps represent a viable option to improve access to mental health resources [<xref ref-type="bibr" rid="ref3">3</xref>]. A form of CBT, brief cognitive behavioral therapy (bCBT) has been suggested for depressive individuals as a means of maintaining the user&#x2019;s attention while not requiring large amounts of the user&#x2019;s time or energy. This form of CBT has successfully delivered therapeutic interventions in a time-efficient manner, around 4&#x2010;16 brief sessions [<xref ref-type="bibr" rid="ref4">4</xref>,<xref ref-type="bibr" rid="ref5">5</xref>] in both subclinical [<xref ref-type="bibr" rid="ref6">6</xref>-<xref ref-type="bibr" rid="ref8">8</xref>] and clinical populations [<xref ref-type="bibr" rid="ref9">9</xref>]. Several bCBT-based apps, such as MoodMission [<xref ref-type="bibr" rid="ref10">10</xref>], Pacifica [<xref ref-type="bibr" rid="ref11">11</xref>], and SuperBetter [<xref ref-type="bibr" rid="ref12">12</xref>], have demonstrated effectiveness in reducing depressive symptoms. Despite their effectiveness, it is unclear how specific app features may enhance user experience to maximize therapeutic benefits.</p><p>The use of virtual agents represents one avenue that may enhance mental health user experience, as virtual agents can be leveraged to mimic realistic human interactions and model social connection [<xref ref-type="bibr" rid="ref13">13</xref>-<xref ref-type="bibr" rid="ref15">15</xref>]. The term &#x201C;virtual agent&#x201D; refers to a noncontrollable, artificial intelligence (AI)-driven virtual entity, such as chatbots and embodied conversational agents (ECAs) designed to interact with users [<xref ref-type="bibr" rid="ref16">16</xref>-<xref ref-type="bibr" rid="ref18">18</xref>]. Chatbots communicate with the user through a textual or voice-based interface design but typically lack a visual embodiment [<xref ref-type="bibr" rid="ref19">19</xref>]. ECAs are characterized by a human-like visual presence and have the capability to include both verbal or nonverbal communication behaviors [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. While chatbots have demonstrated potential in numerous bCBT-based mental health apps, such as Woebot, Wysa, and Tess [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>], ECAs offer a richer, more natural social presence [<xref ref-type="bibr" rid="ref15">15</xref>], making them particularly suited for mental health interventions. Surprisingly, exceptionally few studies have evaluated the effectiveness of bCBT-based mental health apps with ECAs [<xref ref-type="bibr" rid="ref6">6</xref>]. This study addresses this gap by incorporating an ECA-style virtual agent into the app design.</p><p>Given that a key component of ECA-style virtual agents is visual embodiment, the physical characteristics of a virtual agent may impact user experience. Research shows that similarity between a user&#x2019;s demographics and an agent&#x2019;s characteristics, such as gender and voice, fosters positive interactions by building trust and enhancing user motivation [<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. This aligns with the similarity-attraction effect, where users often prefer agents that mirror their own demographics, appearance, and voice [<xref ref-type="bibr" rid="ref24">24</xref>]. In mental health contexts, such similarity has been shown to significantly increase users&#x2019; willingness to engage in support activities [<xref ref-type="bibr" rid="ref25">25</xref>]. To leverage these benefits, the mental health app in this study includes customization options for the agent&#x2019;s physical characteristics, aiming to create a greater sense of connection and comfort during interactions.</p><p>Beyond visual embodiment, 2 key features can be embedded into ECA-style virtual agents to convey the realism of human face-to-face interactions: conversational and animation features. Conversational behaviors, including lip-sync and speech, are used to replicate natural, verbal communication actions [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. Virtual agent verbal cues that align with social norms, such as greetings, small talk, and thanking, foster trust and perceived knowledgeability [<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref30">30</xref>], particularly when the agent uses a formal, familiar voice quality and style [<xref ref-type="bibr" rid="ref31">31</xref>]. In addition, conversational agents can engage in turn-taking and provide feedback, mimicking the natural flow of human conversation [<xref ref-type="bibr" rid="ref15">15</xref>]. Turn-taking allows users to feel that they are actively participating in the interaction, while feedback conveys that the agent is attentive and responsive [<xref ref-type="bibr" rid="ref15">15</xref>]. A systematic review of mental health interventions leveraging conversational agents observed a significant reduction in psychological distress postintervention compared with baseline [<xref ref-type="bibr" rid="ref32">32</xref>]. These findings underscore the preliminary efficacy of virtual agents with conversational features on mental health symptoms and suggests that virtual agent conversational feature may afford empathy and interactivity that mimics therapeutic dynamics [<xref ref-type="bibr" rid="ref32">32</xref>]. However, few studies reviewed were empirical randomized controlled trials [<xref ref-type="bibr" rid="ref32">32</xref>], and the variability in mental health symptoms limits understanding of how conversational features affect individuals with and without depressive symptoms.</p><p>On the other hand, animation supports natural communication by conveying nonverbal behaviors, such as facial expressions, co-speech gestures, body movements, and eye gaze [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. Nonverbal cues, such as nodding and eye gaze indicate active listening and foster rapport [<xref ref-type="bibr" rid="ref33">33</xref>-<xref ref-type="bibr" rid="ref35">35</xref>], while facial expressions can convey emotional responsiveness [<xref ref-type="bibr" rid="ref36">36</xref>]. It is critical for animations to appear natural, as overly expressive facial animations can seem unrealistic [<xref ref-type="bibr" rid="ref37">37</xref>]. Natural animations encourage positive attributions toward virtual agents, such as greater acceptance, trust, credibility, and task appropriateness [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Natural animations also elicit stronger emotional responses and a greater sense of social presence compared with static or partially animated agents [<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref40">40</xref>]. While natural animation cues, such as body movements and facial expressions, can enhance social presence, they are not always effective in conveying trustworthiness [<xref ref-type="bibr" rid="ref31">31</xref>]. Factors like the user&#x2019;s age, the relevance of the animation to the task, and the context (eg, interviews, learning, or commerce) influence how animation impacts perceived trust [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref41">41</xref>], and the effectiveness of such animation features in mental health contexts remains underexplored.</p><p>In human-human therapeutic interactions, body language, tone, and other social cues are critical to conveying empathy and can influence therapeutic outcomes in individuals with depression [<xref ref-type="bibr" rid="ref42">42</xref>-<xref ref-type="bibr" rid="ref44">44</xref>]. Research with chatbots [<xref ref-type="bibr" rid="ref13">13</xref>] and ECAs [<xref ref-type="bibr" rid="ref45">45</xref>] has demonstrated that individuals experiencing depressive symptoms report high perceived virtual agent empathy and user-agent working alliance with levels mirroring that of CBT-based human interventions. These findings suggest that virtual agents may be able to mirror human-human therapeutic interactions by encouraging users to feel understood and supported. Such characteristics may be especially critical for individuals with depression, who often experience negative perceptions of themselves, others, and their environment [<xref ref-type="bibr" rid="ref46">46</xref>]. However, no studies have directly compared how these virtual agent features (eg, conversational vs animation) may influence mental health outcomes and user experience in users with and without depressive symptoms.</p></sec><sec id="s1-2"><title>Objectives</title><p>This study builds on previous work [<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref45">45</xref>] in several ways. First, this study directly compares how virtual agent conversational and animation features influence user experience in the context of mental health apps using a randomized controlled trial design. Second, this study assesses whether either of these features within a bCBT-based mental health app can reduce symptoms of depression, stress, and rumination over 2 weeks. Third, this study compares these features in a sample of users with and without depressive symptoms, addressing gaps in understanding how conversational versus animation features uniquely contribute to mental health outcomes in this population.</p><p>The study hypotheses for the quantitative analyses are outlined below:</p><p>H1: Individuals will exhibit significantly lower symptoms of depression, stress, and rumination after 2 weeks. This reduction will be more pronounced in the conversational and animated conditions.</p><p>H2: Individuals will have a more positive user experience with the agent in the conversational and animated conditions.</p><p>In addition to these quantitative analyses, we will query participants&#x2019; rationale in designing their virtual agents in terms of gender and similarity to people they know through qualitative methods.</p></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Study Overview</title><p>The goal of this 4-arm randomized controlled trial was to determine the effect of virtual agent conversational and animation features within a bCBT-based mental health app on user experience and change in depressive symptoms over a 2-week intervention period. The virtual agent conversational feature reflects dialogue-based interaction between the user and agent, and the virtual agent animation embodies dynamic body movements and facial expressions. Participants completed a baseline training and setup session along with baseline questionnaires through a face-to-face assessment; thereafter, participants completed the intervention and 2-week postintervention questionnaires remotely. In this section, we describe the design of the overall app and virtual agent with a focus on the manipulation of the conversational and animation features. We also describe the methodology and analytic approach used to evaluate these features in a sample of college students with and without depressive symptoms. We note that this study is based partially on dissertation work by lead author SS.</p></sec><sec id="s2-2"><title>Participants</title><p>Following previous research evaluating AirHeart [<xref ref-type="bibr" rid="ref6">6</xref>], an a priori power analysis (<italic>F</italic> test, repeated measures ANOVA (analysis of variance), within-between interaction) was conducted for H1. The analysis aimed for 80% power to detect effects at <italic>P</italic>=.05 with 4 groups and 2 time points, based on previous effect sizes [<xref ref-type="bibr" rid="ref47">47</xref>]. Results indicated a required minimum sample of 136 participants. <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> provides detailed information. We sought to recruit &#x2265;25% (170 minimum) over the minimum to account for attrition and data exclusions.</p><p>A total of 209 college students completed the study and were randomized to one of the 4 experimental conditions (n= 209; mean age 19.97 years<italic>,</italic> SD 2.19; <xref ref-type="table" rid="table1">Table 1</xref>). Participants were incentivized to participate with compensation in the form of course credit, extra credit, or a $20 Amazon gift card, depending on their choice. Participants were excluded if they were outside the 18&#x2010;30 years age range or did not have daily access to a smartphone. Data were excluded for 3 reasons: (1) the participant completed less than 2 CBT-based modules, (2) failed more than 1 attention check, or (3) did not submit the postintervention survey.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Participant demographic Information by depressive group and condition (n=209).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Demographic characteristics</td><td align="left" valign="bottom">Values</td></tr></thead><tbody><tr><td align="left" valign="top"><bold>Gender, n</bold></td><td align="left" valign="middle"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="middle">168</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="middle">39</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Nonbinary</td><td align="left" valign="middle">3</td></tr><tr><td align="left" valign="top"><bold>Race, n</bold></td><td align="left" valign="middle"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>White</td><td align="left" valign="middle">168</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Asian</td><td align="left" valign="middle">19</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Hispanic</td><td align="left" valign="middle">10</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Black</td><td align="left" valign="middle">8</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Bi-Racial</td><td align="left" valign="middle">3</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>American Indian or Native</td><td align="left" valign="middle">1</td></tr><tr><td align="left" valign="top"><bold>Mental health diagnosis, n</bold></td><td align="left" valign="middle"/></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Depression</td><td align="left" valign="middle">60</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Anxiety</td><td align="left" valign="middle">59</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ADHD<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="middle">21</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>PTSD<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td><td align="left" valign="middle">7</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Bipolar II</td><td align="left" valign="middle">4</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Eating disorder</td><td align="left" valign="middle">2</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Adjustment disorder</td><td align="left" valign="middle">1</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Trichotillomania</td><td align="left" valign="middle">1</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mood disorder</td><td align="left" valign="middle">1</td></tr><tr><td align="left" valign="top"><bold>Nondepressive group</bold></td><td align="left" valign="middle"/></tr><tr><td align="left" valign="top">Mean (SD)PHQ-8 score</td><td align="left" valign="middle">2.15 (1.34)</td></tr><tr><td align="left" valign="top">Mean (SD) age (years)</td><td align="left" valign="middle">20.24 (2.49)</td></tr><tr><td align="left" valign="top" colspan="2"><bold>Depressive group</bold></td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mean (SD) PHQ-8 score</td><td align="left" valign="middle">9.29 (3.91)</td></tr><tr><td align="left" valign="top"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mean (SD) age (years)</td><td align="left" valign="middle">19.84 (2.03)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup> ADHD: attention deficit hyperactivity disorder</p></fn><fn id="table1fn2"><p><sup>b</sup> PTSD: Post-Traumatic Stress Disorder</p></fn></table-wrap-foot></table-wrap></sec><sec id="s2-3"><title>AirHeart Mental Health App</title><p>The AirHeart mental health app was designed using Unity 2021 and contains all themes and features of a version published in previous work [<xref ref-type="bibr" rid="ref6">6</xref>] but included new features, such as a help section, additional customization options for the virtual agent, and an additional resources section. The virtual agent was introduced to participants as their &#x201C;virtual coach&#x201C; who joined them on their journey and guided them through CBT topics. Given the importance of customization features to foster user-agent similarity [<xref ref-type="bibr" rid="ref24">24</xref>-<xref ref-type="bibr" rid="ref26">26</xref>], users were able to customize numerous features of their agent, including facial features, body shape, and clothing. The majority of the conversational and animation feature design choices were motivated by past research describing the importance of both verbal (ie, lip sync animation and co-speech gesturing) and nonverbal (ie, head nods and backchanneling) behaviors in conveying natural social communication information [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. All user-agent communication was conducted through natural dyadic verbal exchanges. Users initiated verbal input using a speech-to-text engine, the speech recognition system plugin, while audio-based agent dialogue was created using the following text-to-speech (TTS) engines: RTVoice Native for Android+ AmazonWeb Services Polly Standard for iOS. Additional technical details regarding app development and virtual agent customization, conversational feature, and animation feature development can be found in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>.</p></sec><sec id="s2-4"><title>Experimental Conditions</title><p>The current experiment included 4 experimental conditions differing based on the presence or absence of conversational and animation features (conversational, animated; conversational, nonanimated; animated, conversational; nonconversational, nonanimated). All conditions had access to all app features (ie, CBT modules, journaling, mood tracker, agent customization, help section, and additional resources section).</p><p>The animation feature involved dynamic body movements and facial expressions exhibited by the virtual agent. The animated condition included human-like nonverbal body movements, mouth movements, and gestures in association with the information provided by the virtual agent. The nonanimated condition displayed a static, nonmoving virtual agent with a blank facial expression.</p><p>The conversational feature was characterized by user-agent interactivity in whcih question-and-response style dialogue was embedded within the CBT modules. The virtual agent asked questions or instructed the participant to complete activities aloud. A microphone icon provided a visual cue for users to engage in conversation with the agent when red, the microphone is on, and white when off. The nonconversational condition did not allow the user to add their input or respond to questions. <xref ref-type="fig" rid="figure1">Figure 1</xref> shows the visualization of the virtual agent in the 4 different conditions.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Example of a customized virtual agent in the (A) conversational, animated condition, (B) nonconversational, animated condition, (C) conversational, nonanimated condition, and (D) nonconversational, nonanimated condition. Users were able to customize their agents&#x2019; clothes, hairstyle, hair, skin, eye colors, body and face shape, facial cosmetics, and accessories. Within the conversational condition, the microphone icon provided a visual cue for users such that red indicated the microphone was on, and white indicated when it was off.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="mental_v12i1e67381_fig01.png"/></fig></sec><sec id="s2-5"><title>Measures</title><sec id="s2-5-1"><title>Depressive Symptoms Questionnaire</title><p>The Patient Health Questionnaire-8 (PHQ-8) was used to estimate depressive symptom severity over the past 2 weeks ranging from mild (0&#x2010;4) to severe (20+) [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>].</p></sec><sec id="s2-5-2"><title>Stress Symptoms Questionnaire</title><p>The Perceived Stress Scale -10 (PSS-10) is a subjective assessment of the user&#x2019;s stress symptoms during the past month [<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. Participants&#x2019; scores ranged from 0&#x2010;40, with responses scoring &#x003C;14 suggesting low stress levels and those scoring &#x003E;26 suggesting high stress levels.</p></sec><sec id="s2-5-3"><title>Rumination Symptoms Questionnaire</title><p>The Response Rumination Scale (RRS) is a 22-item questionnaire that measures subjective levels of rumination tendencies [<xref ref-type="bibr" rid="ref52">52</xref>]. Responses are summed, ranging from 0&#x2010;88 with higher scores indicating more ruminative tendencies.</p></sec><sec id="s2-5-4"><title>The mHealth App Usability Questionnaire</title><p>The mHealth App Usability Questionnaire (MAUQ) is a 21-item questionnaire comprised of 3 subscales: ease of use and satisfaction, system information arrangement and usefulness [<xref ref-type="bibr" rid="ref53">53</xref>].</p></sec><sec id="s2-5-5"><title>Open-Ended Qualitative Questions</title><p>Participants were asked the follow open-ended questions: (1) Did you make your virtual coach resemble yourself or someone you know? If so, why? (2) When creating your virtual coach, you were asked to select either a masculine or feminine agent. Please explain how you selected your virtual coach&#x2019;s gender. What was your thought process behind the selection? (3) Do you have any suggestions for how to improve the virtual coach?</p></sec></sec><sec id="s2-6"><title>Procedure</title><p>At the baseline assessment (Time 1), participants were first randomized to one of the 4 virtual agent conditions that varied in conversational and animation features. After providing written informed consent, they completed the mental health questionnaires (PHQ-8, PSS-10, and RRS). Users then downloaded and piloted the AirHeart app using TestFlight, a beta-testing app required for iPhones due to their additional security measures, while Android users could install the app directly. Next, they created an account, followed a tutorial to personalize their virtual agent, and completed the first CBT module. They used the app every other day for 2 weeks, which included a minimum of 8 times for full completion, but additional usage was encouraged. When participants logged into the app for the first time that day, they were prompted to complete the daily questionnaire, view their mood tracker, and then taken to the home page where they had access to the CBT modules. After the 2-week intervention, participants were contacted through email to complete postintervention questionnaires. At this assessment (Time 2), participants completed the mental health (PHQ-8, PSS-10, RRS) questionnaires again as well as the user experience questionnaire (MAU-Q) and open-ended user experience questions.</p></sec><sec id="s2-7"><title>Data Analysis</title><p>To investigate H1, separate 2 (conversational status: present vs absent) &#x00D7; 2 (animation status: present vs absent) &#x00D7; 2 (time: baseline vs postintervention symptoms) mixed effects ANOVAs was used to analyze changes in depressive, stress, and rumination symptoms, respectively. Conversational status and animation status were between-subjects factors; time was a within-subjects repeated measures factor. Sensitivity analyses were conducted that focused only on participants who reported experiencing depressive symptoms (PHQ-8 scores &#x003E;4).</p><p>To assess H2 for the user experience predictions, separate 2 (conversational status: present vs absent) &#x00D7; 2 (animation status: present vs absent) &#x00D7; 2 (depressive status: depressive vs nondepressive state) multifactorial ANOVAs were performed for each of the 3 MAUQ subscales. Using the validated cutoff scores established in previous work [<xref ref-type="bibr" rid="ref48">48</xref>,<xref ref-type="bibr" rid="ref49">49</xref>], PHQ-8 scores ranging from 0&#x2010;4 were considered normal (or nondepressive) and scores of 5 and above were considered in a depressive state. Inclusion of this factor allowed for distinguishing whether individuals with and without current depressive symptoms had user experience preferences for the virtual agent characteristics.</p><p>For quantitative data, we conducted parametric ANOVA analyses after verifying that the data were normally distributed and error variances were equivalent [<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. Box&#x2019;s test confirmed equality of covariance matrices, and Levene test verified homogeneity of variance. Mauchly test ensured sphericity. When appropriate, posthoc pairwise tests were conducted using the Tukey honestly significant difference for between-subjects variables and the Bonferroni adjusted alpha method for within-subjects variables. These methods are widely used in user studies and human factors research in computing [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref39">39</xref>,<xref ref-type="bibr" rid="ref56">56</xref>-<xref ref-type="bibr" rid="ref58">58</xref>]. For the open-ended qualitative questions, a reflexive thematic analysis was performed in order with the procedure specified by Braun and Clarke [<xref ref-type="bibr" rid="ref59">59</xref>] which has been used in numerous user studies evaluating virtual agents [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref60">60</xref>-<xref ref-type="bibr" rid="ref63">63</xref>]. Two researchers independently reviewed deidentified responses, manually created initial codes, and then grouped codes into categories. For each of the 3 qualitative questions, percentage agreement for categories between researchers was &#x003E;85%. Researchers then reviewed the independently-generated categories, consolidated duplicates, and refined and labeled themes. Next, the study conditions (ie, conversational and animated) and depressive group (depressive vs nondepressive) were reattached to the responses to create a frequency data table.</p></sec><sec id="s2-8"><title>Ethical Considerations</title><p>The study procedures were approved by the Clemson University Institutional Review Board (IRB2021-0879) before procedures were implemented. All participants provided written informed consent before participating in the study. They were given the option to opt-out of participating. All data are deidentified. Participants were incentivized to participate with compensation in the form of course credit, extra credit, or a US $20 Amazon gift card, depending on their choice.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Mental Health Symptoms</title><sec id="s3-1-1"><title>Change in Depressive Symptoms</title><p>The 2 (conversational vs nonconversational) &#x00D7; 2 (animated vs nonanimated) &#x00D7; 2 (time: baseline vs postintervention) mixed ANOVA results demonstrated a statistically significant main effect of time (<italic>F</italic>(1, 205)=10.06, <italic>P</italic>=.002; <italic>&#x03B7;p</italic><sup>2</sup>=.05), indicating that depressive symptoms were lower at 2-week follow-up (mean 5.5, SD 4.86) compared with baseline (mean 6.35, SD 4.71) across all 4 experimental conditions. There was no significant main effect of animation condition (<italic>F</italic>(1, 208)=.02, <italic>P</italic>=.91; <italic>&#x03B7;p</italic><sup>2</sup>&#x003C;.001), conversational condition (<italic>F</italic> (1, 208)=.25, <italic>P</italic>=.62, <italic>&#x03B7;p</italic><sup>2</sup>=.001), nor any of the interaction effect (<italic>P</italic>s&#x003E;.05). <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> shows the full ANOVA results.</p><p>We note that when the 2 (conversation: present vs absent) &#x00D7; 2 (animation: present vs absent) &#x00D7; 2 (time: pre vs post) analysis is performed separately for those that meet criteria of depressive symptoms at baseline (PHQ-9 scores&#x003C;6) and those that do not, the results do not differ. Thus, animation and conversation features do not significantly affect change in depressive symptoms for those with or without depressive symptoms.</p></sec><sec id="s3-1-2"><title>Change in Stress Symptoms</title><p>Mixed ANOVA results showed a significant main effect of time (<italic>F</italic>(1, 205)=8.09, <italic>P</italic>=.005; <italic>&#x03B7;p</italic><sup>2</sup>=.038), such that self-reported stress levels were lower at 2-week follow-up (mean 15.91, SD 7.67) than baseline (mean 17.02, SD 6.81) across all 4 experimental conditions. The animation condition (<italic>F</italic>(1, 208)=.007, <italic>P</italic>=.93; <italic>&#x03B7;p</italic><sup>2</sup>&#x003C;.001), conversational condition (<italic>F</italic>(1, 208)=.113, <italic>P</italic>=.74; <italic>&#x03B7;p</italic><sup>2</sup>=.001), and all interaction effects, (<italic>P</italic>s&#x003E;.05) were nonsignificant (<xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>).</p></sec><sec id="s3-1-3"><title>Change in Rumination Symptoms</title><p>A main effect of time indicated that postintervention rumination scores were significant lower after the 2-week intervention (mean 40.42, SD 12.96) when compared with the preintervention scores (mean 41.92, SD 13.61), (<italic>F</italic>(1, 205)=4.88, <italic>P</italic>=.03; <italic>&#x03B7;p</italic><sup>2</sup>=.023) across all 4 conditions. No significant effects were ascertained for animation condition (<italic>F</italic>(1, 208)=.09, <italic>P</italic>=.76; <italic>&#x03B7;p</italic><sup>2</sup>&#x003C;.001) nor the conversational condition (<italic>F</italic>(1, 208)=.37, <italic>P</italic>=.54; <italic>&#x03B7;p</italic><sup>2=</sup>.002). The interaction effect was also nonsignificant (<italic>Ps</italic>&#x003E;.05; <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>).</p></sec></sec><sec id="s3-2"><title>User Experience Results</title><sec id="s3-2-1"><title>MAUQ-Ease of Use and Satisfaction</title><p>The ANOVA analysis on MAUQ-ease of use and satisfaction scores revealed a significant main effect of animation, <italic>F</italic>(1, 201)=102.60, <italic>P</italic>&#x003C;.001, <italic>&#x03B7;p</italic><sup>2</sup>=0.34. <xref ref-type="table" rid="table2">Table 2</xref> shows the mean (SD) values on the MAUQ-ease of use and satisfaction scores, and <xref ref-type="table" rid="table3">Table 3</xref> displays the full ANOVA results. The Tukey honestly significant difference posthoc pairwise comparisons indicated that mean MAUQ-ease of use and satisfaction scores was significantly higher when the agent was animated (mean 39.91, SD 9.51) as compared with when the agent was not (mean 23.35, SD 9.81; <italic>P&#x003C;</italic>.001).</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>mHealth App Usability Questionnaire scores for ease of use and satisfaction.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">Score, mean (SD)</td></tr></thead><tbody><tr><td align="left" valign="top">Animated, n=106</td><td align="char" char="." valign="top">39.91 (9.51)</td></tr><tr><td align="left" valign="top">Nonanimated, n=103</td><td align="char" char="." valign="top">23.35 (9.81)</td></tr><tr><td align="left" valign="top">Conversational, n=105</td><td align="char" char="." valign="top">32.37 (12.53)</td></tr><tr><td align="left" valign="top">Nonconversational, n=104</td><td align="char" char="." valign="top">31.12 (12.93)</td></tr><tr><td align="left" valign="top">Depressed, n=46</td><td align="char" char="." valign="top">32.76 (12.82)</td></tr><tr><td align="left" valign="top">Not depressed, n=163</td><td align="char" char="." valign="top">31.46 (12.71)</td></tr></tbody></table></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Analysis of variance results for the mHealth App Usability Questionnaire-ease of use and satisfaction.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">Effect</td><td align="left" valign="top"><italic>F</italic> value</td><td align="left" valign="top"><italic>P</italic> value</td><td align="left" valign="top">Partial Eta squared (&#x03B7;p<sup>2</sup>)</td></tr></thead><tbody><tr><td align="left" valign="top">Conversational main effect</td><td align="left" valign="top">1.23</td><td align="left" valign="top">.27</td><td align="left" valign="top">.006</td></tr><tr><td align="left" valign="top">Animation main effect</td><td align="left" valign="top">102.60</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">.34</td></tr><tr><td align="left" valign="top">Depressive status main effect</td><td align="left" valign="top">.86</td><td align="left" valign="top">.36</td><td align="left" valign="top">.004</td></tr><tr><td align="left" valign="top">Animated &#x00D7; conversational interaction effect</td><td align="left" valign="top">.32</td><td align="left" valign="top">.57</td><td align="left" valign="top">.002</td></tr><tr><td align="left" valign="top">Conversation &#x00D7; depressive status interaction effect</td><td align="left" valign="top">.024</td><td align="left" valign="top">.88</td><td align="left" valign="top">.0001</td></tr><tr><td align="left" valign="top">Animation &#x00D7; depressive status interaction effect</td><td align="left" valign="top">.024</td><td align="left" valign="top">.88</td><td align="left" valign="top">.0001</td></tr><tr><td align="left" valign="top">Conversation &#x00D7; animated &#x00D7; depressive status interaction effect</td><td align="left" valign="top">.54</td><td align="left" valign="top">.46</td><td align="left" valign="top">.003</td></tr></tbody></table></table-wrap></sec><sec id="s3-2-2"><title>MAUQ-System Information Arrangement</title><p>As presented in <xref ref-type="table" rid="table4">Table 4</xref> and <xref ref-type="table" rid="table5">Table 5</xref>, ANOVA results for the MAUQ-system information arrangement scores showed a significant main effect of animation, <italic>F</italic>(1, 201)=123.12, <italic>P</italic>&#x003C;.001, <italic>&#x03B7;p</italic><sup>2</sup>=.38. The mean MAUQ-system information arrangement scores was significantly higher (mean 30.97, SD 6.87) when the agent was animated as compared with when the agent was not (mean 17.27, SD 7.43; <italic>P</italic>&#x003C;.001).</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>mHealth App Usability Questionnaire scores for system information arrangement.</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">Score, mean (SD)</td></tr></thead><tbody><tr><td align="left" valign="bottom">Animated, n=106</td><td align="left" valign="bottom">30.97 (6.87)</td></tr><tr><td align="left" valign="top">Nonanimated, n=103</td><td align="char" char="." valign="top">17.27 (7.43)</td></tr><tr><td align="left" valign="top">Conversational, n=105</td><td align="char" char="." valign="top">24.35 (9.74)</td></tr><tr><td align="left" valign="top">Nonconversational, n=104</td><td align="char" char="." valign="top">24.09 (10.11)</td></tr><tr><td align="left" valign="top">Depressed, n=46</td><td align="char" char="." valign="top">23.43 (10)</td></tr><tr><td align="left" valign="top">Not depressed, n=163</td><td align="char" char="." valign="top">24.44 (9.89)</td></tr></tbody></table></table-wrap><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Analysis of variance results for mHealth App Usability Questionnaire-system information arrangement.</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">Effect</td><td align="left" valign="top"><italic>F</italic> value</td><td align="left" valign="top"><italic>P</italic> value</td><td align="left" valign="top">Partial Eta squared (&#x03B7;p<sup>2</sup>)</td></tr></thead><tbody><tr><td align="left" valign="top">Conversational main effect</td><td align="left" valign="top">.16</td><td align="left" valign="top">.69</td><td align="left" valign="top">.001</td></tr><tr><td align="left" valign="top">Animation main effect</td><td align="left" valign="top">123.12</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">.38</td></tr><tr><td align="left" valign="top">Depressive status main effect</td><td align="left" valign="top">.44</td><td align="left" valign="top">.51</td><td align="left" valign="top">.002</td></tr><tr><td align="left" valign="top">Animated &#x00D7; conversational interaction effect</td><td align="left" valign="top">1.24</td><td align="left" valign="top">.27</td><td align="left" valign="top">.006</td></tr><tr><td align="left" valign="top">Conversation &#x00D7; depressive status interaction effect</td><td align="left" valign="top">.027</td><td align="left" valign="top">.87</td><td align="left" valign="top">.0001</td></tr><tr><td align="left" valign="top">Animation &#x00D7; depressive status interaction effect</td><td align="left" valign="top">.34</td><td align="left" valign="top">.56</td><td align="left" valign="top">.002</td></tr><tr><td align="left" valign="top">Conversation &#x00D7; animated &#x00D7; depressive status interaction effect</td><td align="left" valign="top">2.81</td><td align="left" valign="top">.096</td><td align="left" valign="top">.014</td></tr></tbody></table></table-wrap></sec><sec id="s3-2-3"><title>MAUQ-Usefulness</title><p>The ANOVA analysis on MAUQ-usefulness scores revealed a significant main effect of animation, revealed a significant main effect of animation, <italic>F</italic>(1, 201)=3667.62, <italic>P</italic>&#x003C;.001, <italic>&#x03B7;p</italic><sup>2</sup>=.17, such that mean MAUQ-usefulness scores were significantly higher when the agent was animated (mean 32.21, SD 9.43) than when the agent was not animated (mean 22.17, SD 9.7; <italic>P</italic>&#x003C;.001; <xref ref-type="table" rid="table6">Table 6</xref> and <xref ref-type="table" rid="table7">Table 7</xref>).</p><table-wrap id="t6" position="float"><label>Table 6.</label><caption><p>mHealth App Usability Questionnaire scores for usefulness.</p></caption><table id="table6" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">Score, mean (SD)</td></tr></thead><tbody><tr><td align="left" valign="bottom">Animated, n=106</td><td align="char" char="." valign="bottom">32.21 (9.43)</td></tr><tr><td align="left" valign="top">Nonanimated, n=103</td><td align="char" char="." valign="top">22.17 (9.7)</td></tr><tr><td align="left" valign="top">Conversational, n=105</td><td align="char" char="." valign="top">27.81 (10.28)</td></tr><tr><td align="left" valign="top">Nonconversational, n=104</td><td align="char" char="." valign="top">26.71 (11.29)</td></tr><tr><td align="left" valign="top">Depressed, n=46</td><td align="char" char="." valign="top">28.59 (11.85)</td></tr><tr><td align="left" valign="top">Not depressed, n=163</td><td align="char" char="." valign="top">26.89 (10.78)</td></tr></tbody></table></table-wrap><table-wrap id="t7" position="float"><label>Table 7.</label><caption><p>Analysis of variance results for the mHealth App Usability Questionnaire-usefulness.</p></caption><table id="table7" frame="hsides" rules="groups"><thead><tr><td align="left" valign="top">Effect</td><td align="left" valign="top"><italic>F</italic> value</td><td align="left" valign="top"><italic>P</italic> value</td><td align="left" valign="top">Partial Eta Squared (&#x03B7;p<sup>2</sup>)</td></tr></thead><tbody><tr><td align="left" valign="top">Conversational main effect</td><td align="left" valign="top">.69</td><td align="left" valign="top">.41</td><td align="left" valign="top">.003</td></tr><tr><td align="left" valign="top">Animation main effect</td><td align="left" valign="top">39.91</td><td align="left" valign="top">&#x003C;.001</td><td align="left" valign="top">.16</td></tr><tr><td align="left" valign="top">Depressive status main effect</td><td align="left" valign="top">1.40</td><td align="left" valign="top">.24</td><td align="left" valign="top">.007</td></tr><tr><td align="left" valign="top">Animated &#x00D7; conversational interaction effect</td><td align="left" valign="top">.85</td><td align="left" valign="top">.36</td><td align="left" valign="top">.004</td></tr><tr><td align="left" valign="top">Conversation &#x00D7; depressive status interaction effect</td><td align="left" valign="top">.001</td><td align="left" valign="top">.97</td><td align="left" valign="top">.000007</td></tr><tr><td align="left" valign="top">Animation &#x00D7; depressive status interaction effect</td><td align="left" valign="top">.002</td><td align="left" valign="top">.97</td><td align="left" valign="top">.000009</td></tr><tr><td align="left" valign="top">Conversation &#x00D7; animated &#x00D7; depressive status interaction effect</td><td align="left" valign="top">2.70</td><td align="left" valign="top">.10</td><td align="left" valign="top">.013</td></tr></tbody></table></table-wrap></sec></sec><sec id="s3-3"><title>Frequency Analysis for Agent Characteristic Selections</title><sec id="s3-3-1"><title>Agent Representativeness Selections</title><p>In total, 95 participants (45.5% of total sample) indicated that they designed the virtual agent to resemble themselves; of these participants, 55 (57.8%) were experiencing depressive symptoms than those that were not experiencing depressive symptoms (<italic>z</italic>=1.54, <italic>P</italic>=.12). Seventy-seven participants (36.8% of total sample) reported that they designed the virtual agent to resemble someone they know, such as a friend, sibling, parent, or current or former therapist. Of these participants, 40 (51.9%) reported experiencing depressive symptoms (<italic>z</italic>=0.33, <italic>P</italic>=.74). The remaining 37 participants (17.7%) reported making the virtual agent resemble a celebrity (n=3), a doctor or professional (n=2), or did not have a specific reason for their virtual agent design (n=32).</p></sec><sec id="s3-3-2"><title>Agent Gender Selections</title><p>Of all participants, 84% (n=175) chose a female virtual agent, and 16% (n=34) chose a male. The majority of participants selected an agent&#x2019;s gender so that it aligned with their own gender: all but 3 female participants (98.2%) chose a female virtual agent, 31 of the 39 males (79.5%) selected a male virtual agent, and both nonbinary participants chose a female agent.</p></sec></sec><sec id="s3-4"><title>Qualitative Results</title><p>Participants were asked to explain the reason they selected the gender of their virtual agent. Responses were collected from all 209 participants, but 3 were excluded for failing to supply a usable response. Two key themes emerged: relatability (89/206, 43.2%) and trust or comfort in talking with a particular gender about one&#x2019;s mental health concerns (160/206; 77.7%); note that some participants listed both reasons. Example quotes to illustrate the relatability theme are listed below:</p><disp-quote><p>I chose a masculine agent because I was making a model of myself.</p><attrib>p#5</attrib></disp-quote><disp-quote><p>Female; I am also female.</p><attrib>p #116</attrib></disp-quote><disp-quote><p>I chose the same gender as mine to connect better with the therapist.</p><attrib>p #176</attrib></disp-quote><p>Quotes describing the comfortability preference with a particular gender are included below:</p><disp-quote><p>I selected a female therapist because I feel more comfortable talking to females about my problems. This is just my personal preference.</p><attrib>p #161</attrib></disp-quote><disp-quote><p>I selected female because I associate women with a more nurturing nature.</p><attrib>p #76</attrib></disp-quote><disp-quote><p>I chose a female because my previous therapist was female and it felt more comfortable.</p><attrib>p #67</attrib></disp-quote><p>Suggestions for improving the virtual agent were collected from all 209 participants, but 39 of them failed to provide a viable answer. The 170 responses resulted in 4 different themes: (1) robotic voice or interaction, (2) lack of personalization or customization, (3) more engagement or realism, and (4) technical issues. Similar to the previous free response question, <italic>z</italic> score proportion tests were conducted for the depressive and nondepressive participants in each category. The robotic voice/interaction (<italic>z</italic>=3.36, <italic>P</italic>&#x003C;.001) was the sole categories to reach significance. A frequency data table was created to help visualize this information (<xref ref-type="table" rid="table8">Table 8</xref>).</p><table-wrap id="t8" position="float"><label>Table 8.</label><caption><p>Visualization of qualitative data: suggestions for virtual agent improvement.</p></caption><table id="table8" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Themes</td><td align="left" valign="bottom">Animation versus nonanimation</td><td align="left" valign="bottom">Conversation versus nonconversation</td><td align="left" valign="bottom">Examples</td><td align="left" valign="bottom">Depressive versus nondepressive</td></tr></thead><tbody><tr><td align="left" valign="top">Robotic voice or interaction</td><td align="left" valign="top">Animated:<break/>44/102<break/>Nonanimated:<break/>58/102</td><td align="left" valign="top">Conversational:<break/>53/102<break/>Nonconversational:<break/>49/102</td><td align="left" valign="top">&#x201C;Make it less robotic&#x201D; (p. 80)<break/>&#x201C;make the voice left stiff- sounds like a robot.&#x201D; (p. 101)<break/>&#x201C;Possibly make the voice more realistic and not as robotic&#x201D; (p. 177)</td><td align="left" valign="top">Depressive:<break/>63/102<break/>Nondepressive:<break/>39/102</td></tr><tr><td align="left" valign="top">More engagement, interaction, or connection</td><td align="left" valign="top">Animated:<break/>7/19<break/>Nonanimated:<break/>12/19</td><td align="left" valign="top">Conversational:<break/>9/19<break/>Nonconversational:<break/>10/19</td><td align="left" valign="top">&#x201C;It didn&#x2019;t really feel like we were having a conversation or that she was listening to my responses&#x201D; (p. 21)<break/>&#x201C;Maybe be more engaging then just talking.&#x201D; (p. 59)</td><td align="left" valign="top">Depressive:<break/>11/19<break/>Nondepressive:<break/>8/19</td></tr><tr><td align="left" valign="top">Lack of personalization</td><td align="left" valign="top">Animated:<break/>23/41<break/>Nonanimated:<break/>18/41</td><td align="left" valign="top">Conversational:<break/>27/41<break/>Nonconversational:<break/>14/41</td><td align="left" valign="top">&#x201C;&#x2026; they did not change their answers based on whether or not I responded so it did not feel very real.&#x201D; (p. 83)<break/>&#x201C;It seemed very scripted, and like I was just typing into a box.&#x201D; (p. 150)</td><td align="left" valign="top">Depressive:<break/>23/41<break/>Nondepressive:<break/>18/41</td></tr><tr><td align="left" valign="top">Tech or user interface issues</td><td align="left" valign="top">Animated:<break/>8/16<break/>Nonanimated:<break/>8/16</td><td align="left" valign="top">Conversational:<break/>11/16<break/>Nonconversational:<break/>5/16</td><td align="left" valign="top">&#x201C;Map wasn&#x2019;t lining up&#x201D; (p. 54)<break/>&#x201C;I think there should be the opportunity to rewind what the therapist says. If I missed something I would have to restart the whole module and that is frustrating.&#x201D; (p. 139)</td><td align="left" valign="top">Depressive:<break/>10/16<break/>Nondepressive:<break/>6/16</td></tr><tr><td align="left" valign="top">No suggestions</td><td align="left" valign="top">Animated:<break/>21/38<break/>Nonanimated:<break/>17/38</td><td align="left" valign="top">Conversational:<break/>14/38<break/>Nonconversational:<break/>24/38</td><td align="left" valign="top">&#x201C;No.&#x201D; (p. 30)<break/>&#x201C;NA&#x201D; (p. 89)</td><td align="left" valign="top">Depressive:<break/>20/38<break/>Nondepressive:<break/>18/38</td></tr></tbody></table></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>The current randomized controlled trial sought to investigate how conversational and animated components of a virtual agent within a bCBT-based mental health app might affect change in depressive symptoms and perceived user experience. Given that individuals experiencing depressive symptoms may have negative views of themselves or others and may struggle with anhedonia, low energy, amongst other symptoms [<xref ref-type="bibr" rid="ref64">64</xref>], it is reasonable that individuals experiencing depressive symptoms may have different intervention needs or preferences compared with those who are not experiencing such symptoms. The results demonstrated that bCBT delivered through a virtual agent within a mental health app significantly reduced symptoms of depression, stress and rumination over a 2-week period, regardless of whether the agent included conversational or animation features. Consequently, these results partially support H1. The animation feature did enhance user experience, while the conversation feature had no significant impact.</p><p>While several empirically-evaluated bCBT-based mental health apps like Woebot, Wysa, Tess, and Fido [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref65">65</xref>] include virtual agents, these existing mental health apps leverage a text-based chatbot design. Such design does not allow for animation features and certain conversational feature components like natural speech dynamics and nonverbal behaviors. In addition, while Tess displays a static picture of a smiling Caucasian female in the text-based chat dialogue box [<xref ref-type="bibr" rid="ref22">22</xref>], Woebot, Wysa, and Fido do not feature a human-like graphic and instead use animals or robots [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref65">65</xref>]. Furthermore, none of these apps feature a customizable virtual agent. In contrast, the AirHeart mental health app included a human-like, customizable virtual agent, and the conversational animated app condition featured both speech and text-based verbal capabilities, nonverbal behaviors, and dynamic animations.</p><p>Small pilot studies on virtual agent-based self-monitoring technologies have shown promise in demonstrating the feasibility and preliminary efficacy in reducing depressive symptoms [<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref68">68</xref>]. The current study advances this work by demonstrating that virtual agent-based bCBT technology can effectively reduce depressive symptoms through a moderate-size randomized controlled trial. While conversational and animation features were expected to enhance the effectiveness of the intervention, particularly among those experiencing depressive symptoms, no added benefit of these features was observed on changes in depressive symptoms, stress, or rumination. Past work has shown that ECA-style virtual agents that mimic human-human interactions may enhance perceived empathy and working alliance with the user [<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref69">69</xref>]. The results of the present study suggest that conversational and animation features may not be critical for establishing a meaningful connection between the virtual agent and the user in the context of bCBT mental health apps for depression. Instead, the social presence of the human-like virtual agent alone may be sufficient.</p><p>Study results indicated that users in the animated agent conditions reported higher ratings for system information arrangement (MAUQ-system information arrangement), ease of use (MAUQ-ease of use and satisfaction), and usefulness (MAUQ-usefulness) compared with those in nonanimated conditions. There was no significant difference in conversational versus nonconversational conditions; therefore, H2 is partially supported. These results suggest that animation can enhance the user experience in mental health interventions, which aligns with previous research showing that the inclusion of both nonverbal behaviors can create more human-like interactions and improve user impressions in mental health contexts [<xref ref-type="bibr" rid="ref34">34</xref>,<xref ref-type="bibr" rid="ref35">35</xref>]. In addition, the inclusion of such animation design has previously demonstrated a strong connection to higher levels of agent acceptance, trust, credibility, and task appropriateness [<xref ref-type="bibr" rid="ref38">38</xref>]. These findings are crucial for developers of mental health interventions, as they underscore the importance of integrating virtual agents with natural animations, such as body, mouth, and gesture movements, to enhance user satisfaction and foster human-like interactions.</p><p>Consistent with the similarity-attraction effect [<xref ref-type="bibr" rid="ref24">24</xref>,<xref ref-type="bibr" rid="ref25">25</xref>], most participants (&#x003E;90%) selected an agent of the same gender as themselves and designed it to resemble themselves or someone familiar, such as a friend, family member, or therapist. This preference aligns with research showing that familiarity provides comfort, particularly during vulnerability [<xref ref-type="bibr" rid="ref70">70</xref>]. Many participants reported feeling more comfortable discussing mental health with females, citing greater relatability on emotional matters. This increased relatability may be more attributable to similarity than stereotypes of females as more emotionally aware and empathetic than men [<xref ref-type="bibr" rid="ref71">71</xref>-<xref ref-type="bibr" rid="ref73">73</xref>]. Indeed, while females often self-report higher empathy, a meta-analysis showed no objective gender differences in empathy [<xref ref-type="bibr" rid="ref74">74</xref>]. The findings support research demonstrating stronger therapeutic alliances when clients and counselors share the same gender, particularly among female clients [<xref ref-type="bibr" rid="ref75">75</xref>], and users&#x2019; preference for same-gender virtual agents [<xref ref-type="bibr" rid="ref76">76</xref>-<xref ref-type="bibr" rid="ref79">79</xref>]. In mental health contexts, gender synchrony has been shown to enhance trust in virtual agents, especially when paired with similar age [<xref ref-type="bibr" rid="ref79">79</xref>]. These results highlight the importance of virtual agent gender customization for relatability in mental health app design. However, past research suggests that developers often rely on stereotypical binary gender cues, which can reinforce societal gender expectations [<xref ref-type="bibr" rid="ref23">23</xref>]. Thus, mental health app developers and researchers should be cognizant of the limitations of stereotypical binary gender cues and enhance features that support diverse gender representation, especially in verbal and nonverbal animations.</p></sec><sec id="s4-2"><title>Limitations and Future Directions</title><p>The qualitative analysis revealed that most participants found the virtual agent&#x2019;s voice robotic and suggested improvements to voice quality. It is possible that the quality of the virtual agent&#x2019;s voice may have impacted the results of the conversational feature. The app used AmazonWeb Services Polly Standard Voice (iOS) and RTVoice Native (Android) for TTS, both of which can sound synthetic, similar to Siri (Apple) or Google Assistant (android). Previous research has shown that synthetic, artificial voices induce an eerie feeling [<xref ref-type="bibr" rid="ref80">80</xref>,<xref ref-type="bibr" rid="ref81">81</xref>], and similar results were found using a TTS agent for CBT-based emotional regulation, where participants also noted the robotic speech [<xref ref-type="bibr" rid="ref82">82</xref>]. Future studies should explore higher quality TTS or prerecorded human voices to enhance user interactions with the virtual agent.</p><p>Furthermore, the study included pre- and 2-week postintervention measurements, but long-term follow-ups assess whether the effects of the intervention are sustained over time were not included in the study design. Additional research is needed to determine the duration of the benefits from the virtual agent-delivered bCBT mental health intervention following the conclusion of app use.</p></sec><sec id="s4-3"><title>Conclusions</title><p>This study is among the first to compare the effectiveness and user experience of a virtual agent bCBT-based mental health app in both users with and without depressive symptoms. The key findings from the study demonstrated that the app intervention was effective in reducing mental health symptoms, regardless of whether the agent included conversational or animation features, but animation features enhanced user experience. These effects were observed in both users with and without depressive symptoms. This work suggests that college students experiencing depressive symptoms may not have unique user experience requirements in mental health apps, and such findings may apply more broadly to wellness apps. The finding that virtual agent animation improves user experience in mental health apps but does not affect the intervention&#x2019;s effectiveness offers valuable insight for optimizing app design, which can help guide future development of digital mental health tools that are both effective and user-friendly.</p></sec></sec></body><back><notes><sec><title>Data Availability</title><p>The data will be available on the Open Science Framework upon publication acceptance.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">bCBT</term><def><p>brief cognitive behavioral therapy</p></def></def-item><def-item><term id="abb2">CBT</term><def><p>cognitive behavioral therapy</p></def></def-item><def-item><term id="abb3">ECA</term><def><p>embodied conversational agent</p></def></def-item><def-item><term id="abb4">MAUQ</term><def><p>mHealth App Usability Questionnaire</p></def></def-item><def-item><term id="abb5">MHealth apps</term><def><p>mental health applications</p></def></def-item><def-item><term id="abb6">TTS</term><def><p>text-to-speech</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="web"><source>Major depression</source><access-date>2025-03-23</access-date><publisher-name>National Institute of Mental Health</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.nimh.nih.gov/health/statistics/major-depression">https://www.nimh.nih.gov/health/statistics/major-depression</ext-link></comment></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="web"><article-title>COVID-19 pandemic triggers 25% increase in prevalence of anxiety and depression worldwide</article-title><source>World Health Organization</source><year>2022</year><month>03</month><day>2</day><access-date>2024-04-05</access-date><publisher-name>World Health Organization</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://www.who.int/news/item/02-03-2022-covid-19-pandemic-triggers-25-increase-in-prevalence-of-anxiety-and-depression-worldwide">https://www.who.int/news/item/02-03-2022-covid-19-pandemic-triggers-25-increase-in-prevalence-of-anxiety-and-depression-worldwide</ext-link></comment></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Purkayastha</surname><given-names>S</given-names> </name><name name-style="western"><surname>Addepally</surname><given-names>SA</given-names> </name><name name-style="western"><surname>Bucher</surname><given-names>S</given-names> </name></person-group><article-title>Engagement and usability of a cognitive behavioral therapy mobile app compared with web-based cognitive behavioral therapy among college students: randomized heuristic trial</article-title><source>JMIR Hum Factors</source><year>2020</year><month>02</month><day>3</day><volume>7</volume><issue>1</issue><fpage>e14146</fpage><pub-id pub-id-type="doi">10.2196/14146</pub-id><pub-id pub-id-type="medline">32012043</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="web"><article-title>A provider&#x2019;s guide to brief CBT | south central MIRECC</article-title><source>US Department of Veterans Affairs</source><access-date>2025-03-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.mirecc.va.gov/visn16/guide-to-brief-cbt-manual.asp">https://www.mirecc.va.gov/visn16/guide-to-brief-cbt-manual.asp</ext-link></comment></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Turner</surname><given-names>A</given-names> </name><name name-style="western"><surname>Hambridge</surname><given-names>J</given-names> </name><name name-style="western"><surname>Baker</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bowman</surname><given-names>J</given-names> </name><name name-style="western"><surname>McElduff</surname><given-names>P</given-names> </name></person-group><article-title>Randomised controlled trial of group cognitive behaviour therapy versus brief intervention for depression in cardiac patients</article-title><source>Aust N Z J Psychiatry</source><year>2013</year><month>03</month><volume>47</volume><issue>3</issue><fpage>235</fpage><lpage>243</lpage><pub-id pub-id-type="doi">10.1177/0004867412460592</pub-id><pub-id pub-id-type="medline">23015750</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Six</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Byrne</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Aly</surname><given-names>H</given-names> </name><name name-style="western"><surname>Harris</surname><given-names>MW</given-names> </name></person-group><article-title>The effect of mental health app customization on depressive symptoms in college students: randomized controlled trial</article-title><source>JMIR Ment Health</source><year>2022</year><month>08</month><day>9</day><volume>9</volume><issue>8</issue><fpage>e39516</fpage><pub-id pub-id-type="doi">10.2196/39516</pub-id><pub-id pub-id-type="medline">35943788</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Atik</surname><given-names>E</given-names> </name><name name-style="western"><surname>Stricker</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sch&#x00FC;ckes</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pittig</surname><given-names>A</given-names> </name></person-group><article-title>Efficacy of a brief blended cognitive behavioral therapy program for the treatment of depression and anxiety in university students: uncontrolled intervention study</article-title><source>JMIR Ment Health</source><year>2023</year><month>08</month><day>25</day><volume>10</volume><fpage>e44742</fpage><pub-id pub-id-type="doi">10.2196/44742</pub-id><pub-id pub-id-type="medline">37624631</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Richards</surname><given-names>D</given-names> </name><name name-style="western"><surname>Richardson</surname><given-names>T</given-names> </name></person-group><article-title>Computer-based psychological treatments for depression: a systematic review and meta-analysis</article-title><source>Clin Psychol Rev</source><year>2012</year><month>06</month><volume>32</volume><issue>4</issue><fpage>329</fpage><lpage>342</lpage><pub-id pub-id-type="doi">10.1016/j.cpr.2012.02.004</pub-id><pub-id pub-id-type="medline">22466510</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Smith</surname><given-names>P</given-names> </name><name name-style="western"><surname>Scott</surname><given-names>R</given-names> </name><name name-style="western"><surname>Eshkevari</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Computerised CBT for depressed adolescents: randomised controlled trial</article-title><source>Behav Res Ther</source><year>2015</year><month>10</month><volume>73</volume><fpage>104</fpage><lpage>110</lpage><pub-id pub-id-type="doi">10.1016/j.brat.2015.07.009</pub-id><pub-id pub-id-type="medline">26301756</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bakker</surname><given-names>D</given-names> </name><name name-style="western"><surname>Kazantzis</surname><given-names>N</given-names> </name><name name-style="western"><surname>Rickwood</surname><given-names>D</given-names> </name><name name-style="western"><surname>Rickard</surname><given-names>N</given-names> </name></person-group><article-title>A randomized controlled trial of three smartphone apps for enhancing public mental health</article-title><source>Behav Res Ther</source><year>2018</year><month>10</month><volume>109</volume><fpage>75</fpage><lpage>83</lpage><pub-id pub-id-type="doi">10.1016/j.brat.2018.08.003</pub-id><pub-id pub-id-type="medline">30125790</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Moberg</surname><given-names>C</given-names> </name><name name-style="western"><surname>Niles</surname><given-names>A</given-names> </name><name name-style="western"><surname>Beermann</surname><given-names>D</given-names> </name></person-group><article-title>Guided self-help works: randomized waitlist controlled trial of Pacifica, a mobile app integrating cognitive behavioral therapy and mindfulness for stress, anxiety, and depression</article-title><source>J Med Internet Res</source><year>2019</year><month>06</month><day>8</day><volume>21</volume><issue>6</issue><fpage>e12556</fpage><pub-id pub-id-type="doi">10.2196/12556</pub-id><pub-id pub-id-type="medline">31199319</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Roepke</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Jaffee</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Riffle</surname><given-names>OM</given-names> </name><name name-style="western"><surname>McGonigal</surname><given-names>J</given-names> </name><name name-style="western"><surname>Broome</surname><given-names>R</given-names> </name><name name-style="western"><surname>Maxwell</surname><given-names>B</given-names> </name></person-group><article-title>Randomized controlled trial of SuperBetter, a smartphone-based/internet-based self-help tool to reduce depressive symptoms</article-title><source>Games Health J</source><year>2015</year><month>06</month><volume>4</volume><issue>3</issue><fpage>235</fpage><lpage>246</lpage><pub-id pub-id-type="doi">10.1089/g4h.2014.0046</pub-id><pub-id pub-id-type="medline">26182069</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Darcy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Daniels</surname><given-names>J</given-names> </name><name name-style="western"><surname>Salinger</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wicks</surname><given-names>P</given-names> </name><name name-style="western"><surname>Robinson</surname><given-names>A</given-names> </name></person-group><article-title>Evidence of human-level bonds established with a digital conversational agent: cross-sectional, retrospective observational study</article-title><source>JMIR Form Res</source><year>2021</year><month>05</month><day>11</day><volume>5</volume><issue>5</issue><fpage>e27868</fpage><pub-id pub-id-type="doi">10.2196/27868</pub-id><pub-id pub-id-type="medline">33973854</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Bickmore</surname><given-names>T</given-names> </name><name name-style="western"><surname>Cassell</surname><given-names>J</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>JCJ</surname><given-names>VK</given-names> </name><name name-style="western"><surname>L</surname><given-names>D</given-names> </name><name name-style="western"><surname>NO</surname><given-names>B</given-names> </name></person-group><article-title>Social dialogue with embodied conversational agents</article-title><source>Advances in Natural Multimodal Dialogue Systems Vol 30 Text, Speech and Language Technology</source><year>2005</year><volume>30</volume><publisher-name>Springer</publisher-name><fpage>23</fpage><lpage>54</lpage><pub-id pub-id-type="doi">10.1007/1-4020-3933-6_2</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Cassell</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bickmore</surname><given-names>T</given-names> </name><name name-style="western"><surname>Campbell</surname><given-names>L</given-names> </name><name name-style="western"><surname>Vilhj&#x00E1;lmsson</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>H</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Cassell</surname><given-names>J</given-names> </name><name name-style="western"><surname>Sullivan</surname><given-names>J</given-names> </name><name name-style="western"><surname>Prevost</surname><given-names>S</given-names> </name><name name-style="western"><surname>Churchill</surname><given-names>EF</given-names> </name></person-group><article-title>Human conversation as a system framework: designing embodied conversational agents</article-title><source>Embodied Conversational Agents</source><year>2000</year><publisher-name>The MIT Press</publisher-name><fpage>29</fpage><lpage>63</lpage><pub-id pub-id-type="doi">10.7551/mitpress/2697.003.0004</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Balakrishnan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Honavar</surname><given-names>V</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Patel</surname><given-names>M</given-names> </name><name name-style="western"><surname>Honavar</surname><given-names>V</given-names> </name><name name-style="western"><surname>Balakrishnan</surname><given-names>K</given-names> </name></person-group><article-title>Evolutionary and neural synthesis of intelligent agents</article-title><source>Advances in the Evolutionary Synthesis of Intelligent Agents</source><year>2001</year><publisher-name>The MIT Press</publisher-name><fpage>1</fpage><lpage>28</lpage><pub-id pub-id-type="doi">10.7551/mitpress/1129.003.0003</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Bertrand</surname><given-names>J</given-names> </name><name name-style="western"><surname>Babu</surname><given-names>SV</given-names> </name><name name-style="western"><surname>Polgreen</surname><given-names>P</given-names> </name><name name-style="western"><surname>Segre</surname><given-names>A</given-names> </name></person-group><article-title>Virtual agents-based simulation for training healthcare workers in hand hygiene procedures</article-title><source>Intelligent Virtual Agents IVA 2010 Lecture Notes in Computer Science</source><year>2010</year><pub-id pub-id-type="doi">10.1007/978-3-642-15892-6_13</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Laranjo</surname><given-names>L</given-names> </name><name name-style="western"><surname>Dunn</surname><given-names>AG</given-names> </name><name name-style="western"><surname>Tong</surname><given-names>HL</given-names> </name><etal/></person-group><article-title>Conversational agents in healthcare: a systematic review</article-title><source>J Am Med Inform Assoc</source><year>2018</year><month>09</month><day>1</day><volume>25</volume><issue>9</issue><fpage>1248</fpage><lpage>1258</lpage><pub-id pub-id-type="doi">10.1093/jamia/ocy072</pub-id><pub-id pub-id-type="medline">30010941</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Weizenbaum</surname><given-names>J</given-names> </name></person-group><source>Computer Power and Human Reason: From Judgment to Calculation</source><year>1976</year><publisher-name>W H Freeman and Company</publisher-name><pub-id pub-id-type="other">0716704641</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fitzpatrick</surname><given-names>KK</given-names> </name><name name-style="western"><surname>Darcy</surname><given-names>A</given-names> </name><name name-style="western"><surname>Vierhile</surname><given-names>M</given-names> </name></person-group><article-title>Delivering cognitive behavior therapy to young adults with symptoms of depression and anxiety using a fully automated conversational agent (Woebot): a randomized controlled trial</article-title><source>JMIR Ment Health</source><year>2017</year><month>06</month><day>6</day><volume>4</volume><issue>2</issue><fpage>e19</fpage><pub-id pub-id-type="doi">10.2196/mental.7785</pub-id><pub-id pub-id-type="medline">28588005</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Inkster</surname><given-names>B</given-names> </name><name name-style="western"><surname>Sarda</surname><given-names>S</given-names> </name><name name-style="western"><surname>Subramanian</surname><given-names>V</given-names> </name></person-group><article-title>An empathy-driven, conversational artificial intelligence agent (Wysa) for digital mental well-being: real-world data evaluation mixed-methods study</article-title><source>JMIR Mhealth Uhealth</source><year>2018</year><month>11</month><day>23</day><volume>6</volume><issue>11</issue><fpage>e12106</fpage><pub-id pub-id-type="doi">10.2196/12106</pub-id><pub-id pub-id-type="medline">30470676</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fulmer</surname><given-names>R</given-names> </name><name name-style="western"><surname>Joerin</surname><given-names>A</given-names> </name><name name-style="western"><surname>Gentile</surname><given-names>B</given-names> </name><name name-style="western"><surname>Lakerink</surname><given-names>L</given-names> </name><name name-style="western"><surname>Rauws</surname><given-names>M</given-names> </name></person-group><article-title>Using psychological artificial intelligence (Tess) to relieve symptoms of depression and anxiety: randomized controlled trial</article-title><source>JMIR Ment Health</source><year>2018</year><month>12</month><day>13</day><volume>5</volume><issue>4</issue><fpage>e64</fpage><pub-id pub-id-type="doi">10.2196/mental.9782</pub-id><pub-id pub-id-type="medline">30545815</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ghosh</surname><given-names>R</given-names> </name><name name-style="western"><surname>Feij&#x00F3;o-Garc&#x00ED;a</surname><given-names>PG</given-names> </name><name name-style="western"><surname>Stuart</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wrenn</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lok</surname><given-names>B</given-names> </name></person-group><article-title>Evaluating face gender cues in virtual humans within and beyond the gender binary</article-title><source>Front Virtual Real</source><year>2023</year><volume>4</volume><fpage>1251420</fpage><pub-id pub-id-type="doi">10.3389/frvir.2023.1251420</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Bernier</surname><given-names>EP</given-names> </name><name name-style="western"><surname>Scassellati</surname><given-names>B</given-names> </name></person-group><article-title>The similarity-attraction effect in human-robot interaction</article-title><source>2010 IEEE 9th International Conference on Development and Learning</source><year>2010</year><publisher-name>IEEE</publisher-name><fpage>286</fpage><lpage>290</lpage><pub-id pub-id-type="doi">10.1109/DEVLRN.2010.5578828</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Feij&#x00F3;o-Garc&#x00ED;a</surname><given-names>PG</given-names> </name><name name-style="western"><surname>Wrenn</surname><given-names>C</given-names> </name><name name-style="western"><surname>Stuart</surname><given-names>J</given-names> </name><name name-style="western"><surname>De Siqueira</surname><given-names>AG</given-names> </name><name name-style="western"><surname>Lok</surname><given-names>B</given-names> </name></person-group><article-title>Participatory design of virtual humans for mental health support among North American computer science students: voice, appearance, and the similarity-attraction effect</article-title><source>ACM Trans Appl Percept</source><year>2023</year><month>07</month><day>31</day><volume>20</volume><issue>3</issue><fpage>1</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.1145/3613961</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Feij&#x00F3;o-Garc&#x00ED;a</surname><given-names>PG</given-names> </name><name name-style="western"><surname>Wrenn</surname><given-names>C</given-names> </name><name name-style="western"><surname>Gomes de Siqueira</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Exploring the effects of user-agent and user-designer similarity in virtual human design to promote mental health intentions for college students</article-title><source>ACM Trans Appl Percept</source><year>2025</year><month>01</month><day>31</day><volume>22</volume><issue>1</issue><fpage>1</fpage><lpage>41</lpage><pub-id pub-id-type="doi">10.1145/3689822</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Babu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Schmugge</surname><given-names>S</given-names> </name><name name-style="western"><surname>Inugala</surname><given-names>R</given-names> </name><name name-style="western"><surname>Rao</surname><given-names>S</given-names> </name><name name-style="western"><surname>Barnes</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hodges</surname><given-names>LF</given-names> </name></person-group><article-title>Marve: A prototype virtual human interface framework for studying human-virtual human interaction</article-title><source>Intelligent Virtual Agents IVA 2005 Lecture Notes in Computer Science</source><year>2005</year><volume>3661</volume><publisher-name>Springer</publisher-name><fpage>120</fpage><lpage>133</lpage><pub-id pub-id-type="doi">10.1007/11550617_11</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Babu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Schmugge</surname><given-names>S</given-names> </name><name name-style="western"><surname>Barnes</surname><given-names>T</given-names> </name><name name-style="western"><surname>Hodges</surname><given-names>LF</given-names> </name></person-group><article-title>&#x201C;What would you like to talk about?&#x201D; an evaluation of social conversations with a virtual receptionist</article-title><source>Intelligent Virtual Agents IVA 2006 Lecture Notes in Computer Science</source><year>2006</year><publisher-name>Springer</publisher-name><fpage>169</fpage><lpage>180</lpage><pub-id pub-id-type="doi">10.1007/11821830_14</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cassell</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bickmore</surname><given-names>T</given-names> </name></person-group><article-title>External manifestations of trustworthiness in the interface</article-title><source>Commun ACM</source><year>2000</year><month>12</month><volume>43</volume><issue>12</issue><fpage>50</fpage><lpage>56</lpage><pub-id pub-id-type="doi">10.1145/355112.355123</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Luximon</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>J</given-names> </name></person-group><article-title>The Iinfluence of anthropomorphic cues on patients&#x2019; perceived anthropomorphism, social presence, trust building, and acceptance of health care conversational agents: within-subject web-based experiment</article-title><source>J Med Internet Res</source><year>2023</year><month>08</month><day>10</day><volume>25</volume><fpage>e44479</fpage><pub-id pub-id-type="doi">10.2196/44479</pub-id><pub-id pub-id-type="medline">37561567</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rheu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>JY</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>W</given-names> </name><name name-style="western"><surname>Huh-Yoo</surname><given-names>J</given-names> </name></person-group><article-title>Systematic review: trust-building factors and implications for conversational agent design</article-title><source>International Journal of Human&#x2013;Computer Interaction</source><year>2021</year><month>01</month><day>2</day><volume>37</volume><issue>1</issue><fpage>81</fpage><lpage>96</lpage><pub-id pub-id-type="doi">10.1080/10447318.2020.1807710</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gaffney</surname><given-names>H</given-names> </name><name name-style="western"><surname>Mansell</surname><given-names>W</given-names> </name><name name-style="western"><surname>Tai</surname><given-names>S</given-names> </name></person-group><article-title>Conversational agents in the treatment of mental health problems: mixed-method systematic review</article-title><source>JMIR Ment Health</source><year>2019</year><month>10</month><day>18</day><volume>6</volume><issue>10</issue><fpage>e14166</fpage><pub-id pub-id-type="doi">10.2196/14166</pub-id><pub-id pub-id-type="medline">31628789</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Frischen</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bayliss</surname><given-names>AP</given-names> </name><name name-style="western"><surname>Tipper</surname><given-names>SP</given-names> </name></person-group><article-title>Gaze cueing of attention: visual attention, social cognition, and individual differences</article-title><source>Psychol Bull</source><year>2007</year><month>07</month><volume>133</volume><issue>4</issue><fpage>694</fpage><lpage>724</lpage><pub-id pub-id-type="doi">10.1037/0033-2909.133.4.694</pub-id><pub-id pub-id-type="medline">17592962</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="book"><article-title>Rapport between humans and socially interactive agents</article-title><source>The Handbook on Socially Interactive Agents: 20 Years of Research on Embodied Conversational Agents, Intelligent Virtual Agents, and Social Robotics, Volume 1: Methods, Behavior, Cognition</source><year>2021</year><fpage>433</fpage><lpage>462</lpage><pub-id pub-id-type="doi">10.1145/3477322.3477335</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>DeVault</surname><given-names>D</given-names> </name><name name-style="western"><surname>Artstein</surname><given-names>R</given-names> </name><name name-style="western"><surname>Benn</surname><given-names>G</given-names> </name><etal/></person-group><article-title>Simsensei kiosk: A virtual human interviewer for healthcare decision support</article-title><source>Proceedings of the 2014 International Conference on Autonomous Agents and Multi-Agent Systems</source><year>2014</year><publisher-name>International Foundation for Autonomous Agents and Multiagent Systems</publisher-name><fpage>1061</fpage><lpage>1068</lpage><comment><ext-link ext-link-type="uri" xlink:href="http://dl.acm.org/citation.cfm?id=2617388.2617415">http://dl.acm.org/citation.cfm?id=2617388.2617415</ext-link></comment></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ekman</surname><given-names>P</given-names> </name></person-group><article-title>Facial expression and emotion</article-title><source>American Psychologist</source><year>1993</year><volume>48</volume><issue>4</issue><fpage>384</fpage><lpage>392</lpage><pub-id pub-id-type="doi">10.1037/0003-066X.48.4.384</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hyde</surname><given-names>J</given-names> </name><name name-style="western"><surname>Carter</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>Kiesler</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hodgins</surname><given-names>JK</given-names> </name></person-group><article-title>Evaluating animated characters: facial motion magnitude influences personality perceptions</article-title><source>ACM Trans Appl Percept</source><year>2016</year><volume>13</volume><issue>2</issue><fpage>1</fpage><lpage>1</lpage><pub-id pub-id-type="doi">10.1145/2851499</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Parmar</surname><given-names>D</given-names> </name><name name-style="western"><surname>Olafsson</surname><given-names>S</given-names> </name><name name-style="western"><surname>Utami</surname><given-names>D</given-names> </name><name name-style="western"><surname>Murali</surname><given-names>P</given-names> </name><name name-style="western"><surname>Bickmore</surname><given-names>T</given-names> </name></person-group><article-title>Designing empathic virtual agents: manipulating animation, voice, rendering, and empathy to create persuasive agents</article-title><source>Auton Agent Multi Agent Syst</source><year>2022</year><month>04</month><volume>36</volume><issue>1</issue><fpage>17</fpage><pub-id pub-id-type="doi">10.1007/s10458-021-09539-1</pub-id><pub-id pub-id-type="medline">35387204</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Babu</surname><given-names>SV</given-names> </name><name name-style="western"><surname>Armstrong</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Effects of virtual human animation on emotion contagion in simulated inter-personal experiences</article-title><source>IEEE Trans Visual Comput Graphics</source><year>2014</year><volume>20</volume><issue>4</issue><fpage>626</fpage><lpage>635</lpage><pub-id pub-id-type="doi">10.1109/TVCG.2014.19</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Volonte</surname><given-names>M</given-names> </name><name name-style="western"><surname>Robb</surname><given-names>A</given-names> </name><name name-style="western"><surname>Duchowski</surname><given-names>AT</given-names> </name><name name-style="western"><surname>Babu</surname><given-names>SV</given-names> </name></person-group><article-title>Empirical evaluation of virtual human conversational and affective animations on visual attention in inter-personal simulations</article-title><source>2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)</source><publisher-name>IEEE</publisher-name><fpage>25</fpage><lpage>32</lpage><pub-id pub-id-type="doi">10.1109/VR.2018.8446364</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>I</given-names> </name><name name-style="western"><surname>Ruiz</surname><given-names>J</given-names> </name></person-group><article-title>Examining the use of nonverbal communication in virtual agents</article-title><source>International Journal of Human&#x2013;Computer Interaction</source><year>2021</year><month>10</month><day>21</day><volume>37</volume><issue>17</issue><fpage>1648</fpage><lpage>1673</lpage><pub-id pub-id-type="doi">10.1080/10447318.2021.1898851</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Malin</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Pos</surname><given-names>AE</given-names> </name></person-group><article-title>The impact of early empathy on alliance building, emotional processing, and outcome during experiential treatment of depression</article-title><source>Psychother Res</source><year>2015</year><volume>25</volume><issue>4</issue><fpage>445</fpage><lpage>459</lpage><pub-id pub-id-type="doi">10.1080/10503307.2014.901572</pub-id><pub-id pub-id-type="medline">24801633</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Falkenstr&#x00F6;m</surname><given-names>F</given-names> </name><name name-style="western"><surname>Ekeblad</surname><given-names>A</given-names> </name><name name-style="western"><surname>Holmqvist</surname><given-names>R</given-names> </name></person-group><article-title>Improvement of the working alliance in one treatment session predicts improvement of depressive symptoms by the next session</article-title><source>J Consult Clin Psychol</source><year>2016</year><volume>84</volume><issue>8</issue><fpage>738</fpage><lpage>751</lpage><pub-id pub-id-type="doi">10.1037/ccp0000119</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pos</surname><given-names>AE</given-names> </name><name name-style="western"><surname>Greenberg</surname><given-names>LS</given-names> </name><name name-style="western"><surname>Goldman</surname><given-names>RN</given-names> </name><name name-style="western"><surname>Korman</surname><given-names>LM</given-names> </name></person-group><article-title>Emotional processing during experiential treatment of depression</article-title><source>J Consult Clin Psychol</source><year>2003</year><month>12</month><volume>71</volume><issue>6</issue><fpage>1007</fpage><lpage>1016</lpage><pub-id pub-id-type="doi">10.1037/0022-006X.71.6.1007</pub-id><pub-id pub-id-type="medline">14622076</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bickmore</surname><given-names>TW</given-names> </name><name name-style="western"><surname>Mitchell</surname><given-names>SE</given-names> </name><name name-style="western"><surname>Jack</surname><given-names>BW</given-names> </name><name name-style="western"><surname>Paasche-Orlow</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Pfeifer</surname><given-names>LM</given-names> </name><name name-style="western"><surname>Odonnell</surname><given-names>J</given-names> </name></person-group><article-title>Response to a relational agent by hospital patients with depressive symptoms</article-title><source>Interact Comput</source><year>2010</year><month>07</month><day>1</day><volume>22</volume><issue>4</issue><fpage>289</fpage><lpage>298</lpage><pub-id pub-id-type="doi">10.1016/j.intcom.2009.12.001</pub-id><pub-id pub-id-type="medline">20628581</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beckham</surname><given-names>EE</given-names> </name><name name-style="western"><surname>Leber</surname><given-names>WR</given-names> </name><name name-style="western"><surname>Watkins</surname><given-names>JT</given-names> </name><name name-style="western"><surname>Boyer</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Cook</surname><given-names>JB</given-names> </name></person-group><article-title>Development of an instrument to measure Beck&#x2019;s cognitive triad: the Cognitive Triad Inventory</article-title><source>J Consult Clin Psychol</source><year>1986</year><month>08</month><volume>54</volume><issue>4</issue><fpage>566</fpage><lpage>567</lpage><pub-id pub-id-type="doi">10.1037//0022-006x.54.4.566</pub-id><pub-id pub-id-type="medline">3745613</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Six</surname><given-names>SG</given-names> </name><name name-style="western"><surname>Byrne</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Tibbett</surname><given-names>TP</given-names> </name><name name-style="western"><surname>Pericot-Valverde</surname><given-names>I</given-names> </name></person-group><article-title>Examining the effectiveness of gamification in mental health apps for depression: systematic review and meta-analysis</article-title><source>JMIR Ment Health</source><year>2021</year><month>11</month><day>29</day><volume>8</volume><issue>11</issue><fpage>e32199</fpage><pub-id pub-id-type="doi">10.2196/32199</pub-id><pub-id pub-id-type="medline">34847058</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kroenke</surname><given-names>K</given-names> </name><name name-style="western"><surname>Spitzer</surname><given-names>RL</given-names> </name><name name-style="western"><surname>Williams</surname><given-names>JBW</given-names> </name></person-group><article-title>The PHQ-9: validity of a brief depression severity measure</article-title><source>J Gen Intern Med</source><year>2001</year><month>09</month><volume>16</volume><issue>9</issue><fpage>606</fpage><lpage>613</lpage><pub-id pub-id-type="doi">10.1046/j.1525-1497.2001.016009606.x</pub-id><pub-id pub-id-type="medline">11556941</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Levis</surname><given-names>B</given-names> </name><name name-style="western"><surname>Riehm</surname><given-names>KE</given-names> </name><etal/></person-group><article-title>Equivalency of the diagnostic accuracy of the PHQ-8 and PHQ-9: a systematic review and individual participant data meta-analysis</article-title><source>Psychol Med</source><year>2020</year><month>06</month><volume>50</volume><issue>8</issue><fpage>1368</fpage><lpage>1380</lpage><pub-id pub-id-type="doi">10.1017/S0033291719001314</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Cohen</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kamarck</surname><given-names>T</given-names> </name><name name-style="western"><surname>Mermelstein</surname><given-names>R</given-names> </name></person-group><source>Perceived Stress Scale Measuring Stress: A Guide for Health and Social Scientists</source><year>1994</year><volume>10</volume><fpage>1</fpage><lpage>2</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.jstor.org/stable/2136404">https://www.jstor.org/stable/2136404</ext-link></comment><pub-id pub-id-type="other">0022-1465</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>EH</given-names> </name></person-group><article-title>Review of the psychometric evidence of the perceived stress scale</article-title><source>Asian Nurs Res (Korean Soc Nurs Sci)</source><year>2012</year><month>12</month><volume>6</volume><issue>4</issue><fpage>121</fpage><lpage>127</lpage><pub-id pub-id-type="doi">10.1016/j.anr.2012.08.004</pub-id><pub-id pub-id-type="medline">25031113</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McMurrich</surname><given-names>SL</given-names> </name><name name-style="western"><surname>Johnson</surname><given-names>SL</given-names> </name></person-group><article-title>Dispositional rumination in individuals with a depression history</article-title><source>Cognit Ther Res</source><year>2008</year><volume>32</volume><issue>4</issue><fpage>542</fpage><lpage>553</lpage><pub-id pub-id-type="doi">10.1007/s10608-006-9093-y</pub-id><pub-id pub-id-type="medline">20126425</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhou</surname><given-names>L</given-names> </name><name name-style="western"><surname>Bao</surname><given-names>J</given-names> </name><name name-style="western"><surname>Setiawan</surname><given-names>IMA</given-names> </name><name name-style="western"><surname>Saptono</surname><given-names>A</given-names> </name><name name-style="western"><surname>Parmanto</surname><given-names>B</given-names> </name></person-group><article-title>The mHealth App Usability Questionnaire (MAUQ): development and validation study</article-title><source>JMIR Mhealth Uhealth</source><year>2019</year><month>04</month><day>11</day><volume>7</volume><issue>4</issue><fpage>e11500</fpage><pub-id pub-id-type="doi">10.2196/11500</pub-id><pub-id pub-id-type="medline">30973342</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Cohen</surname><given-names>BH</given-names> </name><name name-style="western"><surname>Lea</surname><given-names>RB</given-names> </name></person-group><source>Essentials of Statistics for the Social and Behavioral Sciences</source><year>2004</year><publisher-name>John Wiley &#x0026; Sons</publisher-name><pub-id pub-id-type="other">978-0-471-22031-2</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Wilcox</surname><given-names>R</given-names> </name></person-group><source>Modern Statistics for the Social and Behavioral Sciences: A Practical Introduction</source><year>2017</year><publisher-name>Boca Raton, Chapman and Hall/CRC</publisher-name><pub-id pub-id-type="doi">10.1201/9781315154480</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Babu</surname><given-names>SV</given-names> </name><name name-style="western"><surname>Grechkin</surname><given-names>TY</given-names> </name><name name-style="western"><surname>Chihak</surname><given-names>B</given-names> </name><etal/></person-group><article-title>An immersive virtual peer for studying social influences on child cyclists&#x2019; road-crossing behavior</article-title><source>IEEE Trans Visual Comput Graphics</source><year>2010</year><volume>17</volume><issue>1</issue><fpage>14</fpage><lpage>25</lpage><pub-id pub-id-type="doi">10.1109/TVCG.2009.211</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Volante</surname><given-names>M</given-names> </name><name name-style="western"><surname>Babu</surname><given-names>SV</given-names> </name><name name-style="western"><surname>Chaturvedi</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Effects of virtual human appearance fidelity on emotion contagion in affective inter-personal simulations</article-title><source>IEEE Trans Vis Comput Graph</source><year>2016</year><month>04</month><volume>22</volume><issue>4</issue><fpage>1326</fpage><lpage>1335</lpage><pub-id pub-id-type="doi">10.1109/TVCG.2016.2518158</pub-id><pub-id pub-id-type="medline">26780808</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhargava</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bertrand</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Gramopadhye</surname><given-names>AK</given-names> </name><name name-style="western"><surname>Madathil</surname><given-names>KC</given-names> </name><name name-style="western"><surname>Babu</surname><given-names>SV</given-names> </name></person-group><article-title>Evaluating multiple levels of an interaction fidelity continuum on performance and learning in near-field training simulations</article-title><source>IEEE Trans Vis Comput Graph</source><year>2018</year><volume>24</volume><issue>4</issue><fpage>1418</fpage><lpage>1427</lpage><pub-id pub-id-type="doi">10.1109/TVCG.2017.2657238</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Braun</surname><given-names>V</given-names> </name><name name-style="western"><surname>Clarke</surname><given-names>V</given-names> </name></person-group><article-title>Using thematic analysis in psychology</article-title><source>Qual Res Psychol</source><year>2006</year><month>01</month><volume>3</volume><issue>2</issue><fpage>77</fpage><lpage>101</lpage><pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Karhiy</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sagar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Antoni</surname><given-names>M</given-names> </name><name name-style="western"><surname>Loveys</surname><given-names>K</given-names> </name><name name-style="western"><surname>Broadbent</surname><given-names>E</given-names> </name></person-group><article-title>Can A virtual human increase mindfulness and reduce stress? A randomised trial</article-title><source>Computers in Human Behavior: Artificial Humans</source><year>2024</year><month>01</month><volume>2</volume><issue>1</issue><fpage>100069</fpage><pub-id pub-id-type="doi">10.1016/j.chbah.2024.100069</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loveys</surname><given-names>K</given-names> </name><name name-style="western"><surname>Sagar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Pickering</surname><given-names>I</given-names> </name><name name-style="western"><surname>Broadbent</surname><given-names>E</given-names> </name></person-group><article-title>A digital human for delivering A remote loneliness and stress intervention to at-risk younger and older adults during the COVID-19 pandemic: randomized pilot trial</article-title><source>JMIR Ment Health</source><year>2021</year><month>11</month><day>8</day><volume>8</volume><issue>11</issue><fpage>e31586</fpage><pub-id pub-id-type="doi">10.2196/31586</pub-id><pub-id pub-id-type="medline">34596572</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loveys</surname><given-names>K</given-names> </name><name name-style="western"><surname>Antoni</surname><given-names>M</given-names> </name><name name-style="western"><surname>Donkin</surname><given-names>L</given-names> </name><name name-style="western"><surname>Sagar</surname><given-names>M</given-names> </name><name name-style="western"><surname>Broadbent</surname><given-names>E</given-names> </name></person-group><article-title>Comparing the feasibility and acceptability of a virtual human, teletherapy, and an e-manual in delivering a stress management intervention to distressed adult women: pilot study</article-title><source>JMIR Form Res</source><year>2023</year><month>02</month><day>9</day><volume>7</volume><fpage>e42390</fpage><pub-id pub-id-type="doi">10.2196/42390</pub-id><pub-id pub-id-type="medline">36757790</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Kruse</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hertel</surname><given-names>J</given-names> </name><name name-style="western"><surname>Mostajeran</surname><given-names>F</given-names> </name><name name-style="western"><surname>Schmidt</surname><given-names>S</given-names> </name><name name-style="western"><surname>Steinicke</surname><given-names>F</given-names> </name></person-group><article-title>Would you go to a virtual doctor? a systematic literature review on user preferences for embodied virtual agents in healthcare</article-title><source>2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)</source><fpage>672</fpage><lpage>682</lpage><pub-id pub-id-type="doi">10.1109/ISMAR59233.2023.00082</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="book"><source>Diagnostic and Statistical Manual of Mental Disorders</source><year>2013</year><edition>5</edition><publisher-name>American Psychiatric Association</publisher-name><pub-id pub-id-type="doi">10.1176/appi.books.9780890425596</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Karkosz</surname><given-names>S</given-names> </name><name name-style="western"><surname>Szyma&#x0144;ski</surname><given-names>R</given-names> </name><name name-style="western"><surname>Sanna</surname><given-names>K</given-names> </name><name name-style="western"><surname>Micha&#x0142;owski</surname><given-names>J</given-names> </name></person-group><article-title>Effectiveness of a web-based and mobile therapy chatbot on anxiety and depressive symptoms in subclinical young adults: randomized controlled trial</article-title><source>JMIR Form Res</source><year>2024</year><month>03</month><day>20</day><volume>8</volume><fpage>e47960</fpage><pub-id pub-id-type="doi">10.2196/47960</pub-id><pub-id pub-id-type="medline">38506892</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Burton</surname><given-names>C</given-names> </name><name name-style="western"><surname>Szentagotai Tatar</surname><given-names>A</given-names> </name><name name-style="western"><surname>McKinstry</surname><given-names>B</given-names> </name><etal/></person-group><article-title>Pilot randomised controlled trial of Help4Mood, an embodied virtual agent-based system to support treatment of depression</article-title><source>J Telemed Telecare</source><year>2016</year><month>09</month><volume>22</volume><issue>6</issue><fpage>348</fpage><lpage>355</lpage><pub-id pub-id-type="doi">10.1177/1357633X15609793</pub-id><pub-id pub-id-type="medline">26453910</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pinto</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Greenblatt</surname><given-names>AM</given-names> </name><name name-style="western"><surname>Hickman</surname><given-names>RL</given-names> </name><name name-style="western"><surname>Rice</surname><given-names>HM</given-names> </name><name name-style="western"><surname>Thomas</surname><given-names>TL</given-names> </name><name name-style="western"><surname>Clochesy</surname><given-names>JM</given-names> </name></person-group><article-title>Assessing the critical parameters of eSMART-MH: a promising avatar-based digital therapeutic intervention to reduce depressive symptoms</article-title><source>Perspect Psychiatr Care</source><year>2016</year><month>07</month><volume>52</volume><issue>3</issue><fpage>157</fpage><lpage>168</lpage><pub-id pub-id-type="doi">10.1111/ppc.12112</pub-id><pub-id pub-id-type="medline">25800698</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pinto</surname><given-names>MD</given-names> </name><name name-style="western"><surname>Hickman</surname><given-names>RL</given-names> </name><name name-style="western"><surname>Clochesy</surname><given-names>J</given-names> </name><name name-style="western"><surname>Buchner</surname><given-names>M</given-names> </name></person-group><article-title>Avatar-based depression self-management technology: promising approach to improve depressive symptoms among young adults</article-title><source>Appl Nurs Res</source><year>2013</year><month>02</month><volume>26</volume><issue>1</issue><fpage>45</fpage><lpage>48</lpage><pub-id pub-id-type="doi">10.1016/j.apnr.2012.08.003</pub-id><pub-id pub-id-type="medline">23265918</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bickmore</surname><given-names>T</given-names> </name><name name-style="western"><surname>Gruber</surname><given-names>A</given-names> </name></person-group><article-title>Relational agents in clinical psychiatry</article-title><source>Harv Rev Psychiatry</source><year>2010</year><volume>18</volume><issue>2</issue><fpage>119</fpage><lpage>130</lpage><pub-id pub-id-type="doi">10.3109/10673221003707538</pub-id><pub-id pub-id-type="medline">20235777</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>de Vries</surname><given-names>M</given-names> </name><name name-style="western"><surname>Holland</surname><given-names>RW</given-names> </name><name name-style="western"><surname>Chenier</surname><given-names>T</given-names> </name><name name-style="western"><surname>Starr</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Winkielman</surname><given-names>P</given-names> </name></person-group><article-title>Happiness cools the warm glow of familiarity: psychophysiological evidence that mood modulates the familiarity-affect link</article-title><source>Psychol Sci</source><year>2010</year><month>03</month><volume>21</volume><issue>3</issue><fpage>321</fpage><lpage>328</lpage><pub-id pub-id-type="doi">10.1177/0956797609359878</pub-id><pub-id pub-id-type="medline">20424063</pub-id></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Christov-Moore</surname><given-names>L</given-names> </name><name name-style="western"><surname>Simpson</surname><given-names>EA</given-names> </name><name name-style="western"><surname>Coud&#x00E9;</surname><given-names>G</given-names> </name><name name-style="western"><surname>Grigaityte</surname><given-names>K</given-names> </name><name name-style="western"><surname>Iacoboni</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ferrari</surname><given-names>PF</given-names> </name></person-group><article-title>Empathy: gender effects in brain and behavior</article-title><source>Neurosci Biobehav Rev</source><year>2014</year><month>10</month><volume>46 Pt 4</volume><issue>Pt 4</issue><fpage>604</fpage><lpage>627</lpage><pub-id pub-id-type="doi">10.1016/j.neubiorev.2014.09.001</pub-id><pub-id pub-id-type="medline">25236781</pub-id></nlm-citation></ref><ref id="ref72"><label>72</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Eagly</surname><given-names>AH</given-names> </name><name name-style="western"><surname>Woo</surname><given-names>W</given-names> </name><name name-style="western"><surname>Diekman</surname><given-names>AB</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Eckes</surname><given-names>T</given-names> </name><name name-style="western"><surname>Trautner</surname><given-names>HM</given-names> </name></person-group><article-title>Social role theory of sex differences and similiarities: A current appraisal</article-title><source>The Developmental Social Psychology of Gender</source><year>2012</year><publisher-name>Psychology Press</publisher-name><fpage>123</fpage><lpage>174</lpage><pub-id pub-id-type="doi">10.4324/9781410605245</pub-id><pub-id pub-id-type="other">e9781410605245</pub-id></nlm-citation></ref><ref id="ref73"><label>73</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ellemers</surname><given-names>N</given-names> </name></person-group><article-title>Gender stereotypes</article-title><source>Annu Rev Psychol</source><year>2018</year><month>01</month><day>4</day><volume>69</volume><issue>1</issue><fpage>275</fpage><lpage>298</lpage><pub-id pub-id-type="doi">10.1146/annurev-psych-122216-011719</pub-id><pub-id pub-id-type="medline">28961059</pub-id></nlm-citation></ref><ref id="ref74"><label>74</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Eisenberg</surname><given-names>N</given-names> </name><name name-style="western"><surname>Lennon</surname><given-names>R</given-names> </name></person-group><article-title>Sex differences in empathy and related capacities</article-title><source>Psychol Bull</source><year>1983</year><volume>94</volume><issue>1</issue><fpage>100</fpage><lpage>131</lpage><pub-id pub-id-type="doi">10.1037/0033-2909.94.1.100</pub-id></nlm-citation></ref><ref id="ref75"><label>75</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhati</surname><given-names>KS</given-names> </name></person-group><article-title>Effect of client-therapist gender match on the therapeutic relationship: an exploratory analysis</article-title><source>Psychol Rep</source><year>2014</year><month>10</month><volume>115</volume><issue>2</issue><fpage>565</fpage><lpage>583</lpage><pub-id pub-id-type="doi">10.2466/21.02.PR0.115c23z1</pub-id><pub-id pub-id-type="medline">25243363</pub-id></nlm-citation></ref><ref id="ref76"><label>76</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Guadagno</surname><given-names>RE</given-names> </name><name name-style="western"><surname>Swinth</surname><given-names>KR</given-names> </name><name name-style="western"><surname>Blascovich</surname><given-names>J</given-names> </name></person-group><article-title>Social evaluations of embodied agents and avatars</article-title><source>Comput Human Behav</source><year>2011</year><month>11</month><volume>27</volume><issue>6</issue><fpage>2380</fpage><lpage>2385</lpage><pub-id pub-id-type="doi">10.1016/j.chb.2011.07.017</pub-id></nlm-citation></ref><ref id="ref77"><label>77</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Baylor</surname><given-names>AL</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>E</given-names> </name></person-group><article-title>Pedagogical agents as learning companions: the impact of agent emotion and gender</article-title><source>Computer Assisted Learning</source><year>2007</year><month>06</month><volume>23</volume><issue>3</issue><fpage>220</fpage><lpage>234</lpage><pub-id pub-id-type="doi">10.1111/j.1365-2729.2006.00210.x</pub-id></nlm-citation></ref><ref id="ref78"><label>78</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>Nass</surname><given-names>C</given-names> </name><name name-style="western"><surname>Brave</surname><given-names>S</given-names> </name></person-group><article-title>Can computer-generated speech have gender? an experimental test of gender stereotype</article-title><source>CHI &#x2019;00 Extended Abstracts on Human Factors in Computing Systems</source><year>2000</year><publisher-name>Association for Computing Machinery</publisher-name><fpage>289</fpage><lpage>290</lpage><pub-id pub-id-type="doi">10.1145/633292.633461</pub-id></nlm-citation></ref><ref id="ref79"><label>79</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Feij&#x00F3;o-Garc&#x00ED;a</surname><given-names>PG</given-names> </name><name name-style="western"><surname>Wrenn</surname><given-names>C</given-names> </name><name name-style="western"><surname>Kalogeras</surname><given-names>S</given-names> </name><name name-style="western"><surname>Payne</surname><given-names>C</given-names> </name><name name-style="western"><surname>Lok</surname><given-names>B</given-names> </name><name name-style="western"><surname>Omojokun</surname><given-names>O</given-names> </name></person-group><article-title>Effects of gender synchrony in user-agent interactions: integrating the designer as a product cue in virtual human design for mental health support</article-title><source>Proceedings of the 12th International Conference on Human-Agent Interaction</source><year>2024</year><publisher-name>Association for Computing Machinery</publisher-name><fpage>123</fpage><lpage>131</lpage><pub-id pub-id-type="doi">10.1145/3687272.3688326</pub-id><pub-id pub-id-type="other">9798400711787</pub-id></nlm-citation></ref><ref id="ref80"><label>80</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Abdulrahman</surname><given-names>A</given-names> </name><name name-style="western"><surname>Richards</surname><given-names>D</given-names> </name></person-group><article-title>Is natural necessary? Human voice versus synthetic voice for intelligent virtual agents</article-title><source>MTI</source><year>2022</year><volume>6</volume><issue>7</issue><fpage>51</fpage><pub-id pub-id-type="doi">10.3390/mti6070051</pub-id></nlm-citation></ref><ref id="ref81"><label>81</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Foukarakis</surname><given-names>M</given-names> </name><name name-style="western"><surname>Karuzaki</surname><given-names>E</given-names> </name><name name-style="western"><surname>Adami</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Quality assessment of virtual human assistants for elder users</article-title><source>Electronics (Basel)</source><year>2022</year><volume>11</volume><issue>19</issue><fpage>3069</fpage><pub-id pub-id-type="doi">10.3390/electronics11193069</pub-id></nlm-citation></ref><ref id="ref82"><label>82</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hopman</surname><given-names>K</given-names> </name><name name-style="western"><surname>Richards</surname><given-names>D</given-names> </name><name name-style="western"><surname>Norberg</surname><given-names>MM</given-names> </name></person-group><article-title>A digital coach to promote emotion regulation skills</article-title><source>MTI</source><year>2023</year><volume>7</volume><issue>6</issue><fpage>57</fpage><pub-id pub-id-type="doi">10.3390/mti7060057</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Study power analysis details.</p><media xlink:href="mental_v12i1e67381_app1.docx" xlink:title="DOCX File, 74 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Detailed description of the AirHeart app development, virtual agent features, and cognitive behavioral therapy modules.</p><media xlink:href="mental_v12i1e67381_app2.docx" xlink:title="DOCX File, 199 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Mixed analysis of variance results for change in depressive symptoms.</p><media xlink:href="mental_v12i1e67381_app3.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Mixed analysis of variance results for change in stress.</p><media xlink:href="mental_v12i1e67381_app4.docx" xlink:title="DOCX File, 19 KB"/></supplementary-material><supplementary-material id="app5"><label>Multimedia Appendix 5</label><p>Mixed analysis of variance results for change in rumination symptoms.</p><media xlink:href="mental_v12i1e67381_app5.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material><supplementary-material id="app6"><label>Checklist 1</label><p>CONSORT checklist.</p><media xlink:href="mental_v12i1e67381_app6.pdf" xlink:title="PDF File, 1156 KB"/></supplementary-material></app-group></back></article>