<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMH</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id>
      <journal-title>JMIR Mental Health</journal-title>
      <issn pub-type="epub">2368-7959</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v11i1e53203</article-id>
      <article-id pub-id-type="pmid">38889401</article-id>
      <article-id pub-id-type="doi">10.2196/53203</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Viewpoint</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Viewpoint</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>The Machine Speaks: Conversational AI and the Importance of Effort to Relationships of Meaning</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Torous</surname>
            <given-names>John</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Marshall</surname>
            <given-names>Robert</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhao</surname>
            <given-names>Xinyan</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Hartford</surname>
            <given-names>Anna</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Neuroscience Institute</institution>
            <institution>University of Cape Town</institution>
            <addr-line>Groote Schuur Hospital, Observatory</addr-line>
            <addr-line>Cape Town, 7935</addr-line>
            <country>South Africa</country>
            <phone>27 214042174</phone>
            <email>annahartford@gmail.com</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6993-3800</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Stein</surname>
            <given-names>Dan J</given-names>
          </name>
          <degrees>PhD, DPhil</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7218-7810</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Neuroscience Institute</institution>
        <institution>University of Cape Town</institution>
        <addr-line>Cape Town</addr-line>
        <country>South Africa</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>South African Medical Research Council Unit on Risk and Resilience in Mental Disorders</institution>
        <institution>Department of Psychiatry</institution>
        <institution>University of Cape Town</institution>
        <addr-line>Cape Town</addr-line>
        <country>South Africa</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Anna Hartford <email>annahartford@gmail.com</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <year>2024</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>18</day>
        <month>6</month>
        <year>2024</year>
      </pub-date>
      <volume>11</volume>
      <elocation-id>e53203</elocation-id>
      <history>
        <date date-type="received">
          <day>29</day>
          <month>9</month>
          <year>2023</year>
        </date>
        <date date-type="rev-request">
          <day>11</day>
          <month>12</month>
          <year>2023</year>
        </date>
        <date date-type="rev-recd">
          <day>25</day>
          <month>1</month>
          <year>2024</year>
        </date>
        <date date-type="accepted">
          <day>26</day>
          <month>1</month>
          <year>2024</year>
        </date>
      </history>
      <copyright-statement>©Anna Hartford, Dan J Stein. Originally published in JMIR Mental Health (https://mental.jmir.org), 18.06.2024.</copyright-statement>
      <copyright-year>2024</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on https://mental.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://mental.jmir.org/2024/1/e53203" xlink:type="simple"/>
      <abstract>
        <p>The focus of debates about conversational artificial intelligence (CAI) has largely been on social and ethical concerns that arise when we speak to machines—what is gained and what is lost when we replace our human interlocutors, including our human therapists, with AI. In this viewpoint, we focus instead on a distinct and growing phenomenon: letting machines speak for us. What is at stake when we replace our own efforts at interpersonal engagement with CAI? The purpose of these technologies is, in part, to remove effort, but effort has enormous value, and in some cases, even intrinsic value. This is true in many realms, but especially in interpersonal relationships. To make an effort for someone, irrespective of what that effort amounts to, often conveys value and meaning in itself. We elaborate on the meaning, worth, and significance that may be lost when we relinquish effort in our interpersonal engagements as well as on the opportunities for self-understanding and growth that we may forsake.</p>
      </abstract>
      <kwd-group>
        <kwd>artificial intelligence</kwd>
        <kwd>AI</kwd>
        <kwd>conversational AIs</kwd>
        <kwd>generative AI</kwd>
        <kwd>intimacy</kwd>
        <kwd>human-machine interaction</kwd>
        <kwd>interpersonal relationships</kwd>
        <kwd>effort</kwd>
        <kwd>psychotherapy</kwd>
        <kwd>conversation</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Conversation is central to our shared humanity. It is the means through which we make ourselves knowable to another and come to know them in turn. Our mental states—our beliefs, feelings, intentions, desires, and attitudes—are in some respects unreachable by another and sometimes even opaque to ourselves. However, in conversation, we render them articulable, and therefore, accessible. Not unrelatedly, in these exchanges, we often learn about ourselves as well as the other person. The recent emergence of powerful conversational artificial intelligences (CAIs) has therefore been unsettling on various levels (far more so than equally powerful AIs that operate in mediums besides conversation). In their extraordinary replication of the means through which we express our mental states, it is tempting to impute these states to our AI interlocutors. After all, the articulation of thinking (or feeling, hoping, willing, and desiring) is usually all the evidence we require to attribute the relevant mental states to someone.</p>
      <p>In her book, Reclaiming Conversation: The Power of Talk in a Digital Age, Sherry Turkle [<xref ref-type="bibr" rid="ref1">1</xref>] endeavors to make the case for conversation in a world that has increasingly abandoned it for the conveniences (and safeties) of mere digital connection. “At a first, we speak through machines and forget how essential face-to-face conversation is to our relationships, our creativity, and our capacity for empathy,” Turkle writes. “At a second, we take a further step and speak not just through machines but to machines. This is the turning point” [<xref ref-type="bibr" rid="ref1">1</xref>]. This concern was prescient, and Turkle has more recently elaborated on it with reference to the proliferation of CAIs or social chatbots, such as Xiaoice, Woebot, or Replika. These CAIs aim to provide intimacy, but of what sort? Turkle suggests that this intimacy is necessarily fraudulent since it is (by design) devoid of the emotional vulnerability crucial to genuine intimacy [<xref ref-type="bibr" rid="ref2">2</xref>]. Similarly, these CAIs eliminate the demands and challenges of empathy required for genuine interpersonal exchanges [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. These arguments align with Turkle’s long-standing critique of how computers affect our relationships with ourselves and with others [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>].</p>
    </sec>
    <sec>
      <title>“Speaking to Machines”: CAIs and the Possibility of Insight</title>
      <p>There is ongoing debate concerning the type and quality of conversations possible with CAIs and their appropriateness in therapeutic contexts. In psychotherapy, the digitization of many processes may suggest that CAIs can simply replace the therapist. However, it is also possible to argue that the psychotherapeutic relationship and the experience of that relationship are what is most crucial. In psychodynamic psychotherapy, the client experiences transference while the therapist experiences counter-transference, and working through these processes leads to therapeutic change. In frameworks more influenced by cognitive-behavioral principles, such as schema therapy, the therapist may play a key role in providing “reparenting,” a process that leads to positive outcomes.</p>
      <p>Ethical concerns with CAIs in therapeutic contexts include the biases and other harmful prompts that might arise in such exchanges, along with the potential dearth of responsibility and accountability for these harms [<xref ref-type="bibr" rid="ref6">6</xref>-<xref ref-type="bibr" rid="ref10">10</xref>]. However, even if such patent ethical concerns were addressed or eradicated, central questions would persist: what sort of presence or entity do we have in CAIs? [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref14">14</xref>] and perhaps relatedly, is it possible for CAIs to facilitate genuine self-knowledge, self-understanding, and insight in their human interlocutors?</p>
      <p>Some have suggested that engagements with CAIs are necessarily deficient in this crucial respect, especially if we consider the practice of joint attention, as well as other forms of mutual recognition and acknowledgment, to be central to the therapeutic conversation (and indeed to conversation more generally) [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref16">16</xref>]. Relatedly, there are concerns about the lost mutuality of these exchanges [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. Conversations with bots do not demand that we empathize with or accommodate another, since, in an important sense, there is no one else there [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref18">18</xref>]. As Andrew McStay [<xref ref-type="bibr" rid="ref18">18</xref>] points out, much depends on the account of empathy we are assuming. McStay argues that accounts that are more accommodative of CAIs are “deficient and potentially dangerous” insofar as they lack interdependence, copresence, and particularly moral responsibility [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
      <p>However, others disagree with these characterizations and see no reason why CAIs cannot encourage genuine introspection [<xref ref-type="bibr" rid="ref19">19</xref>-<xref ref-type="bibr" rid="ref21">21</xref>]. What is required in the therapeutic exchange—according to some of these proponents—is not necessarily mutual agency, but rather the experience of being emotionally supported and encouraged to engage in self-reflection [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>]. To necessitate another subjectivity, or the presence of another full-fledged agent, is to presuppose the illegitimacy of CAIs in these contexts, and to needlessly curtail the possibilities of what qualifies as a genuine therapeutic conversation. After all, human therapists regularly fail to generate the conditions for self-understanding and insight, irrespective of their full-fledged agency [<xref ref-type="bibr" rid="ref19">19</xref>].</p>
      <p>We find these counterarguments compelling. Furthermore, if therapeutic benefits are possible through CAIs—as some research suggests [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref24">24</xref>] (although far more investigation is required [<xref ref-type="bibr" rid="ref25">25</xref>])—then we potentially have a powerful tool in therapeutic CAIs. Given the immense shortfall in mental health care globally [<xref ref-type="bibr" rid="ref26">26</xref>,<xref ref-type="bibr" rid="ref27">27</xref>] and the often prohibitive cost of undertaking conventional psychotherapy, we would be remiss to hastily disregard the beneficial possibilities of therapeutic CAIs. Moreover, certain individuals and populations might experience unique benefits from the format of engagement required by therapeutic exchanges with CAIs, and (relatedly) may not experience the particular advantages of in-person conversation highlighted by advocates such as Turkle (this point has been made with regard to children on the autism spectrum in particular [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]).</p>
    </sec>
    <sec>
      <title>Being Spoken for: CAIs and Surrendering Articulation</title>
      <p>In recent reckonings with the rise of CAIs, the focus has generally been on concerns like those outlined above: what becomes of us when we increasingly replace our human interlocutors—including our human therapists—with AIs, that is (in Turkle’s phrase) “when we speak not just through machines but to machines” [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      <p>Our central concern in this viewpoint, however, is different. Although certain dimensions of the preceding debate are of relevance to our position, we can also remain agnostic with regard to the value of “speaking to machines,” whether in a therapeutic context or otherwise. We can remain open to the possibility that bot and human engagements can generate genuine depth, worth, and meaning. Furthermore, we need not presume that the conditions for the emergence of genuine self-understanding and self-reflection cannot be generated in interactions with CAIs. Rather, our concern arises independently, for we now seem to have reached another turning point, and one that extends even further. “At a third point,” we might add to Turkle’s list, “we take yet another step and let machines speak for us” [<xref ref-type="bibr" rid="ref1">1</xref>].</p>
      <p>We will concentrate on the significance of these forces to our self-knowledge and our interpersonal relationships, although more could be, and has been, said about their implications more generally, for example, concerning achievement gaps (where automation threatens to undermine genuine achievement, and therefore, meaningful work [<xref ref-type="bibr" rid="ref30">30</xref>]) and responsibility gaps (where automation threatens to undermine responsibility for harmful outcomes [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]).</p>
      <p>Our central concern will be the following: what is potentially at stake, personally and interpersonally, when we let the machine speak for us? We will explore this question within the framework of philosophical and ethical debates concerning the interpersonal value of effort, rather than exploring it qualitatively or quantitatively (although further empirical research on these questions would be valuable).</p>
      <p>This third transition can take many forms, some seemingly more trivial than others. When we are writing an email and the remainder of the sentence auto-fills in gray, we are tempted to stop speaking for ourselves and let the machine speak for us instead.</p>
      <p>At times, the costs of this surrender may seem slight, if they exist at all. What does it matter if you articulate some rote phrase to a distant work acquaintance or have it articulated for you instead? However, in other circumstances and other relationships, even these subtle interventions can carry weight.</p>
      <p>In an early exploration of the implications of large language models—written in 2019, before the mass rollout of ChatGPT and other large language models—the journalist John Seabrook [<xref ref-type="bibr" rid="ref33">33</xref>] wrote the following about the experience of using Smart Compose to autocomplete his emails:</p>
      <disp-quote>
        <p>Finally, I crossed my Rubicon. The sentence itself was a pedestrian affair. Typing an e-mail to my son, I began “I am p—” and was about to write “pleased” when predictive text suggested “proud of you.” I am proud of you. Wow, I don’t say that enough. And clearly Smart Compose thinks that’s what most fathers in my state say to their sons in e-mails. I hit Tab. No biggie.</p>
      </disp-quote>
      <disp-quote>
        <p>And yet, sitting there at the keyboard, I could feel the uncanny valley prickling my neck.</p>
      </disp-quote>
      <p>Nowadays, the modes in which the machine can speak for us have expanded enormously from these first modest iterations. There are many examples to consider, and many more are developing as we write, but the ways through which we can outsource the labor of our interpersonal articulations are currently expanding exponentially.</p>
      <p>Take one example: it is now possible to get CAIs to message on your behalf on dating apps. A variety of start-ups have generated different tools that allow you to hand over your messages to an AI [<xref ref-type="bibr" rid="ref34">34</xref>]. Instead of having to initiate a conversation with a prospective date—or to come up with thoughtful or witty replies to their messages—AI will do it for you.</p>
      <p>When you do not care for the people you are messaging, this option offers a certain pragmatic appeal (especially given the volume of messaging that contemporary dating apps necessitate). However, when you <italic>do</italic> care about a person, the temptation might be even stronger. The CAI, after all, always has an idea of what to say next, and moreover, it offers a version of what you <italic>should</italic> say—a statistically probable representation of what <italic>people like you</italic> say at <italic>times like this</italic>. In comparison, speaking for yourself can feel risky. The things you might say on your own—the way in which you try to make yourself known and get to know others—might be odd, off-putting, or <italic>wrong</italic> somehow.</p>
      <p>Take another example: in June 2023, <italic>The New York Times</italic> reported that some doctors were turning to AI to communicate compassionately with patients [<xref ref-type="bibr" rid="ref35">35</xref>]. We have all experienced the sense of inadequacy that comes with trying to say something supportive to someone who is in an awful circumstance. At such times, we can cast around for ages and summon nothing but cliches. How alluring it is to have a ready-made response instead, and one so well trained in the performance of genuine feeling. The AI’s messages will be, in many cases, much better than what we could have produced on our own—kinder, more thoughtful, and more encouraging. Yet no matter how superbly it manages to express care and compassion, this expression is of course divorced from any genuine experience of care and compassion. We should be cautious, in our expedient outsourcing of this emotional connection and engagement, of when we begin to divorce ourselves from the genuine experience of care and compassion along with it.</p>
      <p>When we are struggling to find the right thing to say, it may feel like we are achieving nothing. Yet it is precisely in these times—as we try to understand what someone else is enduring, to feel for them, and to express that feeling—that we are undertaking the genuine experience of care and compassion, without which the words themselves are hollow.</p>
      <p>One optimistic response is that we might learn more empathetic engagement from the example of the machines. However, this seems unlikely. It is like suggesting that we will improve our spelling skills by relying on automated spell-check or that we will remember more phone numbers through the excellent example set by our phones. Of course, we will not, as the process removes effort, and little of importance has ever been learned without effort.</p>
      <p>Thinking ahead—and not necessarily too far ahead—it is possible to see how the temptation to let the machine speak might overspill our text-based conversations. The push to normalize mixed-reality engagements—most notably with the launch of Apple’s Vision Pro headset last year—would make it possible for the machine to take over not only our text-based correspondence but also our face-to-face conversations.</p>
      <p>We are, right now, at the initial stages of the temptation to begin ceding our expressions to CAIs. However, with little imagination, it is easy to see all the ways in which these temptations are poised to grow. After all, if it was largely the machine whose messages charmed someone into going on a date with you in the first place, how enticing would it be to let the machine keep on speaking when you have to go on the date yourself? The machine speaks with such authority, and as our confidence in its utterances grows, our confidence in our own could correspondingly diminish.</p>
      <p>To our mind, the potential costs (to one’s own humanity and to our shared humanity) of CAIs are greatest when we allow them to speak for us. Genuine conversation nurtures authentic engagement with others and a better understanding of ourselves. Turkle [<xref ref-type="bibr" rid="ref1">1</xref>] emphasizes what is lost when we speak through machines, and further still, when we speak <italic>to</italic> machines, but there is, even in these latter engagements, the possibility of coming to know our own thoughts and feelings, of having to search for, and to find, the expression for our experience, and recognizing that the experience precedes the expression that follows.</p>
      <p>However, when we allow the machine to speak for us, even this possibility diminishes. We can too easily avoid the effort it takes to genuinely understand ourselves and our unique circumstances (undertakings that are not necessarily discouraged by speaking <italic>to</italic> machines [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref29">29</xref>]). We are not encouraged to find the expression for our experience. Instead, we can too easily mistake whichever expressions we receive for our own experience, scarcely recognizing what we have lost in the exchange.</p>
    </sec>
    <sec>
      <title>Effort and Meaning</title>
      <p>The purpose of these technologies is, in no small part, to remove effort. To take something that once required a great deal from us and make it require little to nothing. Effort is by definition a burden, and in any given instance of having to exert effort, we are always wishing there was a way to be rid of it, but effort also has enormous value, and in some cases, even intrinsic value. This can be true in many realms—there are crucial senses in which “achievement” itself is impossible without effort [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]—but it is especially true in our interpersonal relationships. In some interpretations, effort allows us to <italic>reveal</italic> our care and concern for one another and make it knowable. In such interpretations, its role is primarily epistemic. This epistemic role is not trivial in itself, but there are also interpretations whereby effort is more significant still—instead of only allowing us to <italic>reveal</italic> care and concern, it may also <italic>generate</italic> this care and concern [<xref ref-type="bibr" rid="ref37">37</xref>,<xref ref-type="bibr" rid="ref38">38</xref>]. Imagine a husband who lovingly cares for his wife through a long illness. His devotion through this ordeal might not only reveal the depths of his love for his wife, but it could also <italic>generate</italic> those depths.</p>
      <p>In this sense, the exertion of effort might have both generative and revelatory value in our interpersonal relationships, and the relinquishment of effort might have serious costs on both fronts. To make an effort for someone, irrespective of what that effort amounts to, conveys value and meaning in itself. Many of our interpersonal practices are ways of trying to make real or manifest the effort that is in fact of genuine importance to us. In turn, when effort is removed from these practices, so is their worth.</p>
      <p>Take one example: nowadays, Facebook provides automatic reminders of people’s birthdays. The moment this memory became automated, the fact of remembering someone’s birthday (which used to carry weight and significance) became increasingly meaningless. It is now possible to set up your account to automatically post a rote birthday message on the appropriate day; you need not even give the person a moment’s thought. These automated messages are equivalent, in terms of their interpersonal worth, to the automated birthday messages sent by a bank or a mobile service provider. Without requiring any thought or effort, the whole practice loses its significance. What other forms of interaction could we surrender to this fate, as we are increasingly able to opt for the effortless modes of expressing pride, love, affection, or consolation to the people around us?</p>
    </sec>
    <sec>
      <title>Conclusions</title>
      <p>In turn, we should begin to think carefully (even if just for ourselves) about which of these technologies we choose to use, in different contexts and spheres of our lives, and which ones we do not. Where we choose to use them, we should think equally hard about the <italic>manner</italic> of our engagement and the extent of our agency within it; the more passive we allow ourselves to be, the greater the potential costs we have gestured to in this viewpoint. This is especially true when it comes to those undertakings that have value in and of themselves—rather than value only for their outputs [<xref ref-type="bibr" rid="ref30">30</xref>]—and also, as we have emphasized in this viewpoint, when it comes to those relationships and human interactions in which our engaged presence, as well as our emotional and intellectual attention and reflection, carries so much significance.</p>
      <p>There is an adage in developmental psychology: the toys that are best for children are the ones that require them to do the most work. “The best toys are 90% the kid, 10% the toy,” as psychologist Kathy Hirsh-Pasek put it. “If it’s 90% the toy, and 10% the kid, that’s a problem” [<xref ref-type="bibr" rid="ref39">39</xref>]. The toys that demand the most of a child are the ones that generate creativity, teach them problem-solving, and encourage their social interactions. On the other hand, the toys that merely require a child to press a button will teach them only to press a button over and over again. When we consider children, we usually show special caution for what will aid and hamper their development, flourishing, and well-being. However, our development does not cease after childhood, and indeed, much of the hardest work (in learning to know as well as relate to ourselves and others) still lies ahead. Given that, we should perhaps pause and wonder what opportunities for self-development we might be forsaking as we embrace, ever more, the toys that want to do everything for us, while we ourselves do less and less. We should remember, in the ceaseless war against effort, that far from needing to be eradicated at every opportunity, there are spheres of our lives—and our interpersonal relationships are a prime example—where effort itself can be the whole point.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">CAI</term>
          <def>
            <p>conversational artificial intelligence</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>DJS has received consultancy honoraria from Discovery Vitality, Johnson &#38; Johnson, Kanna, L’Oreal, Lundbeck, Orion, Sanofi, Servier, Takeda and Vistagen.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turkle</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Reclaiming Conversation: The Power of Talk in a Digital Age</source>
          <year>2015</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Penguin books</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turkle</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>That chatbot I’ve loved to hate</article-title>
          <source>MIT Technology Review</source>
          <year>2020</year>
          <access-date>2024-05-27</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.technologyreview.com/2020/08/18/1006096/that-chatbot-ive-loved-to-hate/">https://www.technologyreview.com/2020/08/18/1006096/that-chatbot-ive-loved-to-hate/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turkle</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>The Empath Diaries: A Memoire</source>
          <year>2021</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>The Penguin Books</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turkle</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Life on the Screen: Identity in the Age of the Internet</source>
          <year>1995</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Simon &#38; Schuster</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Turkle</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>Alone Together: Why We Expect More From Technology and Less From Each Other?</source>
          <year>2011</year>
          <publisher-loc>New York, NY</publisher-loc>
          <publisher-name>Basic Books</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laacke</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Bias and epistemic injustice in conversational AI</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>46</fpage>
          <lpage>48</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191055</pub-id>
          <pub-id pub-id-type="medline">37130400</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kasirzadeh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gabriel</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>In conversation with artificial intelligence: aligning language models with human values</article-title>
          <source>Philos Technol</source>
          <year>2023</year>
          <month>04</month>
          <day>19</day>
          <volume>36</volume>
          <fpage>1</fpage>
          <lpage>24</lpage>
          <pub-id pub-id-type="doi">10.1007/s13347-023-00606-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blodgett</surname>
              <given-names>SL</given-names>
            </name>
            <name name-style="western">
              <surname>Lopez</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Olteanu</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Sim</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Wallach</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Stereotyping Norwegian salmon: an inventory of pitfalls in fairness benchmark datasets</article-title>
          <year>2021</year>
          <conf-name>Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)</conf-name>
          <conf-date>August 1 - 6</conf-date>
          <conf-loc>Virtual event</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Henderson</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Sinha</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Angelard-Gontier</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Ke</surname>
              <given-names>NR</given-names>
            </name>
            <name name-style="western">
              <surname>Fried</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Lowe</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Ethical challenges in data-driven dialogue systems</article-title>
          <year>2018</year>
          <conf-name>AIES '18: AAAI/ACM Conference on AI, Ethics, and Society</conf-name>
          <conf-date>February 2 - 3</conf-date>
          <conf-loc>New Orleans, LA</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3278721.3278777</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Welbl</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Glaese</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Uesato</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Dathathri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mellor</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Hendricks</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kohli</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Coppin</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Challenges in detoxifying language models</article-title>
          <source>arXiv</source>
          <comment>Preprint posted online on Sep 15, 2021</comment>
          <pub-id pub-id-type="doi">10.48550/arXiv.2109.07445</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sedlakova</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Trachsel</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Conversational artificial intelligence in psychotherapy: a new therapeutic tool or agent?</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>4</fpage>
          <lpage>13</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.5167/uzh-218039"/>
          </comment>
          <pub-id pub-id-type="doi">10.1080/15265161.2022.2048739</pub-id>
          <pub-id pub-id-type="medline">35362368</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nyholm</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Tools and/or Agents? Reflections on Sedlakova and Trachsel's Discussion of Conversational Artificial Intelligence</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>17</fpage>
          <lpage>19</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191053</pub-id>
          <pub-id pub-id-type="medline">37130387</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>AI as agency without intelligence: on ChatGPT, large language models, and other generative models</article-title>
          <source>Philos Technol</source>
          <year>2023</year>
          <month>03</month>
          <day>10</day>
          <volume>36</volume>
          <issue>1</issue>
          <fpage>1</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1007/s13347-023-00621-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Montemayor</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Language and intelligence</article-title>
          <source>Mind Mach</source>
          <year>2021</year>
          <month>08</month>
          <day>08</day>
          <volume>31</volume>
          <issue>4</issue>
          <fpage>471</fpage>
          <lpage>486</lpage>
          <pub-id pub-id-type="doi">10.1007/s11023-021-09568-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Strijbos</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Jongepier</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Self-knowledge in psychotherapy: adopting a dual perspective on one’s own mental states</article-title>
          <source>PPP</source>
          <year>2018</year>
          <volume>25</volume>
          <issue>1</issue>
          <fpage>45</fpage>
          <lpage>58</lpage>
          <pub-id pub-id-type="doi">10.1353/ppp.2018.0008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wieland</surname>
              <given-names>LC</given-names>
            </name>
          </person-group>
          <article-title>Relational reciprocity from conversational artificial intelligence in psychotherapy</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>35</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191033</pub-id>
          <pub-id pub-id-type="medline">37130399</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Friedman</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Ethical concerns with replacing human relations with humanoid robots: an ubuntu perspective</article-title>
          <source>AI Ethics</source>
          <year>2022</year>
          <month>06</month>
          <day>20</day>
          <volume>3</volume>
          <issue>2</issue>
          <fpage>527</fpage>
          <lpage>538</lpage>
          <pub-id pub-id-type="doi">10.1007/s43681-022-00186-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McStay</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Replika in the Metaverse: the moral problem with empathy in 'It from Bit'</article-title>
          <source>AI Ethics</source>
          <year>2022</year>
          <month>12</month>
          <day>22</day>
          <fpage>1</fpage>
          <lpage>13</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/36573214"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s43681-022-00252-7</pub-id>
          <pub-id pub-id-type="medline">36573214</pub-id>
          <pub-id pub-id-type="pii">252</pub-id>
          <pub-id pub-id-type="pmcid">PMC9773645</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Grodniewicz</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Hohol</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Therapeutic conversational artificial intelligence and the acquisition of self-understanding</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>59</fpage>
          <lpage>61</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191021</pub-id>
          <pub-id pub-id-type="medline">37130405</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hurley</surname>
              <given-names>ME</given-names>
            </name>
            <name name-style="western">
              <surname>Lang</surname>
              <given-names>BH</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>JN</given-names>
            </name>
          </person-group>
          <article-title>Therapeutic artificial intelligence: does agential status matter?</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>33</fpage>
          <lpage>35</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191037</pub-id>
          <pub-id pub-id-type="medline">37130404</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gray</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deception mode: how conversational AI can respect patient autonomy</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>55</fpage>
          <lpage>57</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191023</pub-id>
          <pub-id pub-id-type="medline">37130415</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Burr</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Floridi</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <source>Ethics of Digital Well-Being: A Multidisciplinary Approach</source>
          <year>2020</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rubeis</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>E-mental health applications for depression: an evidence-based ethical analysis</article-title>
          <source>Eur Arch Psychiatry Clin Neurosci</source>
          <year>2021</year>
          <month>04</month>
          <volume>271</volume>
          <issue>3</issue>
          <fpage>549</fpage>
          <lpage>555</lpage>
          <pub-id pub-id-type="doi">10.1007/s00406-019-01093-y</pub-id>
          <pub-id pub-id-type="medline">31894391</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00406-019-01093-y</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fiske</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Henningsen</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Buyx</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>The implications of embodied artificial intelligence in mental healthcare for digital wellbeing</article-title>
          <source>Ethics of Digital Well-Being</source>
          <year>2020</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Torous</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Cerrato</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Halamka</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Targeting depressive symptoms with technology</article-title>
          <source>mHealth</source>
          <year>2019</year>
          <volume>5</volume>
          <fpage>19</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/31463305"/>
          </comment>
          <pub-id pub-id-type="doi">10.21037/mhealth.2019.06.04</pub-id>
          <pub-id pub-id-type="medline">31463305</pub-id>
          <pub-id pub-id-type="pii">mh-05-2019.06.04</pub-id>
          <pub-id pub-id-type="pmcid">PMC6691087</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Amram</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Klempner</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Shturman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Greenbaum</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Therapists or replicants? ethical, legal, and social considerations for using ChatGPT in therapy</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>40</fpage>
          <lpage>42</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191022</pub-id>
          <pub-id pub-id-type="medline">37130418</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <collab>Mental Health and Substance Use (MSD)</collab>
            <collab>World Health Organization</collab>
          </person-group>
          <source>The Mental Health Atlas 2020</source>
          <year>2021</year>
          <month>10</month>
          <day>8</day>
          <publisher-loc>Geneva</publisher-loc>
          <publisher-name>World Health Organization</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tartaro</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Cassell</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Playing with virtual peers. Bootstrapping contingent discourse in children with autism</article-title>
          <year>2008</year>
          <conf-name>Proceedings of the Eighth International Conference for the Learning Sciences – ICLS 2008, Volumes 2 (pp. 382-389)</conf-name>
          <conf-date>June 24 - 28</conf-date>
          <conf-loc>Utrecht, The Netherlands</conf-loc>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tigard</surname>
              <given-names>DW</given-names>
            </name>
          </person-group>
          <article-title>Toward relational diversity for AI in psychotherapy</article-title>
          <source>Am J Bioeth</source>
          <year>2023</year>
          <month>05</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>64</fpage>
          <lpage>66</lpage>
          <pub-id pub-id-type="doi">10.1080/15265161.2023.2191047</pub-id>
          <pub-id pub-id-type="medline">37130412</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Danaher</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Nyholm</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Automation, work and the achievement gap</article-title>
          <source>AI Ethics</source>
          <year>2020</year>
          <month>11</month>
          <day>23</day>
          <volume>1</volume>
          <issue>3</issue>
          <fpage>227</fpage>
          <lpage>237</lpage>
          <pub-id pub-id-type="doi">10.1007/s43681-020-00028-x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Matthias</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>The responsibility gap: Ascribing responsibility for the actions of learning automata</article-title>
          <source>Ethics and Information Technology</source>
          <year>2004</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer</publisher-name>
          <fpage>A</fpage>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nyholm</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Attributing agency to automated systems: reflections on human-robot collaborations and responsibility-Loci</article-title>
          <source>Sci Eng Ethics</source>
          <year>2018</year>
          <month>08</month>
          <volume>24</volume>
          <issue>4</issue>
          <fpage>1201</fpage>
          <lpage>1219</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28721641"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s11948-017-9943-x</pub-id>
          <pub-id pub-id-type="medline">28721641</pub-id>
          <pub-id pub-id-type="pii">10.1007/s11948-017-9943-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC6097047</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Seabrook</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>The next word</article-title>
          <source>The New Yorker</source>
          <year>2019</year>
          <access-date>2024-05-27</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.newyorker.com/magazine/2019/10/14/can-a-machine-learn-to-write-for-the-new-yorker">https://www.newyorker.com/magazine/2019/10/14/can-a-machine-learn-to-write-for-the-new-yorker</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lorenz</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Welcome to the age of automated dating: multiple start-ups are building AI tools for romantic connections?</article-title>
          <source>Washington Post</source>
          <year>2023</year>
          <access-date>2024-05-27</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.washingtonpost.com/technology/2023/04/23/dating-ai-automated-online/">https://www.washingtonpost.com/technology/2023/04/23/dating-ai-automated-online/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kolata</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>When doctors use a chatbot to improve their bedside manner?</article-title>
          <source>The New York Times</source>
          <year>2023</year>
          <access-date>2024-05-27</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/3p6aasyv">https://tinyurl.com/3p6aasyv</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bradford</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <source>Achievement</source>
          <year>2015</year>
          <publisher-loc>Oxford, UK</publisher-loc>
          <publisher-name>Oxford University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Guerrero</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Robichaud</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wieland</surname>
              <given-names>JW</given-names>
            </name>
          </person-group>
          <article-title>Intellectual difficulty and moral responsibility</article-title>
          <source>Responsibility: The Epistemic Condition</source>
          <year>2017</year>
          <publisher-loc>Oxford, UK</publisher-loc>
          <publisher-name>Oxford University Press</publisher-name>
          <fpage>199</fpage>
          <lpage>218</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nelkin</surname>
              <given-names>DK</given-names>
            </name>
          </person-group>
          <article-title>Difficulty and degrees of moral praiseworthiness and blameworthiness</article-title>
          <source>Nous</source>
          <year>2014</year>
          <month>11</month>
          <day>14</day>
          <volume>50</volume>
          <issue>2</issue>
          <fpage>356</fpage>
          <lpage>378</lpage>
          <pub-id pub-id-type="doi">10.1111/nous.12079</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Blasdel</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>'They want toys to get their children into Harvard?': have we been getting playthings all wrong?</article-title>
          <source>The Guardian</source>
          <year>2022</year>
          <access-date>2024-05-27</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tinyurl.com/3d274nnk">https://tinyurl.com/3d274nnk</ext-link>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
