<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMH</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id>
      <journal-title>JMIR Mental Health</journal-title>
      <issn pub-type="epub">2368-7959</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i2e31724</article-id>
      <article-id pub-id-type="pmid">35147507</article-id>
      <article-id pub-id-type="doi">10.2196/31724</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>In Search of State and Trait Emotion Markers in Mobile-Sensed Language: Field Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Torous</surname>
            <given-names>John</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Tang</surname>
            <given-names>Sunny</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Toki</surname>
            <given-names>Eugenia</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Carlier</surname>
            <given-names>Chiara</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Department of Psychology and Educational Sciences</institution>
            <institution>Katholieke Universiteit Leuven</institution>
            <addr-line>Tiensestraat 102</addr-line>
            <addr-line>Leuven, 3000</addr-line>
            <country>Belgium</country>
            <phone>32 16 37 44 85</phone>
            <email>chiara.carlier@student.kuleuven.be</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-4985-6240</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Niemeijer</surname>
            <given-names>Koen</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0816-534X</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Mestdagh</surname>
            <given-names>Merijn</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5077-861X</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Bauwens</surname>
            <given-names>Michael</given-names>
          </name>
          <degrees>MSc</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6340-7516</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Vanbrabant</surname>
            <given-names>Peter</given-names>
          </name>
          <degrees>MA</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0217-3303</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Geurts</surname>
            <given-names>Luc</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9608-9147</ext-link>
        </contrib>
        <contrib id="contrib7" contrib-type="author">
          <name name-style="western">
            <surname>van Waterschoot</surname>
            <given-names>Toon</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6323-7350</ext-link>
        </contrib>
        <contrib id="contrib8" contrib-type="author">
          <name name-style="western">
            <surname>Kuppens</surname>
            <given-names>Peter</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-2363-2356</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Department of Psychology and Educational Sciences</institution>
        <institution>Katholieke Universiteit Leuven</institution>
        <addr-line>Leuven</addr-line>
        <country>Belgium</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Department of Smart Organisations</institution>
        <institution>University College Leuven-Limburg</institution>
        <addr-line>Heverlee</addr-line>
        <country>Belgium</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Department of Computer Science</institution>
        <institution>Katholieke Universiteit Leuven</institution>
        <addr-line>Leuven</addr-line>
        <country>Belgium</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Electrical Engineering</institution>
        <institution>Katholieke Universiteit Leuven</institution>
        <addr-line>Leuven</addr-line>
        <country>Belgium</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Chiara Carlier <email>chiara.carlier@student.kuleuven.be</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>2</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>11</day>
        <month>2</month>
        <year>2022</year>
      </pub-date>
      <volume>9</volume>
      <issue>2</issue>
      <elocation-id>e31724</elocation-id>
      <history>
        <date date-type="received">
          <day>3</day>
          <month>7</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>14</day>
          <month>8</month>
          <year>2021</year>
        </date>
        <date date-type="rev-recd">
          <day>21</day>
          <month>9</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>8</day>
          <month>10</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Chiara Carlier, Koen Niemeijer, Merijn Mestdagh, Michael Bauwens, Peter Vanbrabant, Luc Geurts, Toon van Waterschoot, Peter Kuppens. Originally published in JMIR Mental Health (https://mental.jmir.org), 11.02.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on https://mental.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://mental.jmir.org/2022/2/e31724" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Emotions and mood are important for overall well-being. Therefore, the search for continuous, effortless emotion prediction methods is an important field of study. Mobile sensing provides a promising tool and can capture one of the most telling signs of emotion: language.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The aim of this study is to examine the separate and combined predictive value of mobile-sensed language data sources for detecting both momentary emotional experience as well as global individual differences in emotional traits and depression.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>In a 2-week experience sampling method study, we collected self-reported emotion ratings and voice recordings 10 times a day, continuous keyboard activity, and trait depression severity. We correlated state and trait emotions and depression and language, distinguishing between speech content (spoken words), speech form (voice acoustics), writing content (written words), and writing form (typing dynamics). We also investigated how well these features predicted state and trait emotions using cross-validation to select features and a hold-out set for validation.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Overall, the reported emotions and mobile-sensed language demonstrated weak correlations. The most significant correlations were found between speech content and state emotions and between speech form and state emotions, ranging up to 0.25. Speech content provided the best predictions for state emotions. None of the trait emotion–language correlations remained significant after correction. Among the emotions studied, valence and happiness displayed the most significant correlations and the highest predictive performance.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Although using mobile-sensed language as an emotion marker shows some promise, correlations and predictive <italic>R</italic><sup>2</sup> values are low.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>depression</kwd>
        <kwd>emotions</kwd>
        <kwd>mobile sensing</kwd>
        <kwd>language</kwd>
        <kwd>LIWC</kwd>
        <kwd>openSMILE</kwd>
        <kwd>speech</kwd>
        <kwd>writing</kwd>
        <kwd>mobile phone</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Emotions are crucial to human survival, functioning, and well-being. They alert us to opportunities and challenges in our environment and motivate us to act on them to serve our goals and concerns [<xref ref-type="bibr" rid="ref1">1</xref>]. As such, how people feel throughout their daily lives is an important determinant of their overall mental well-being [<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>]. On average, feeling higher levels of positive emotions and lower levels of negative emotions is generally considered to reflect better well-being, and mood disorders involve extreme instantiations of this [<xref ref-type="bibr" rid="ref4">4</xref>]. Aside from average levels, emotions are in constant movement and fluctuation over time [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref5">5</xref>]. Small but repeated deviations in moment-to-moment emotion dynamics can accumulate over time into larger deviances in mood and, ultimately, episodes of mood disorders. Therefore, reliable and suitable methods to measure people’s daily life emotions, in terms of both momentary fluctuations and average levels, are much needed to further improve the study of emotion and emotion disorder and help in the detection and prevention of maladaptive emotional functioning. One of the ways in which people convey emotions is language. In this paper, we will examine to what extent language-based data collected through mobile sensing can be instrumental in the prediction of emotions.</p>
      </sec>
      <sec>
        <title>Experience Sampling Method</title>
        <p>The current gold standard for researching emotion (dynamics) in daily life is the experience sampling method (ESM). Participants complete a short survey on how they feel multiple times a day, allowing data to be collected during their normal routine [<xref ref-type="bibr" rid="ref6">6</xref>]. The momentary nature of the assessment helps mitigate memory biases, enhances ecological validity, and allows for within-person patterning and investigating of relationships [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>].</p>
        <p>However valuable, the ESM has some drawbacks. Interrupting daily activities for a survey multiple times a day can be burdensome [<xref ref-type="bibr" rid="ref10">10</xref>]. Motivation loss may induce untruthful or superficial responses, compromising data quality [<xref ref-type="bibr" rid="ref11">11</xref>]. Furthermore, thinking about emotions multiple times a day may influence their natural flow [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref12">12</xref>], and social desirability in self-reports is a known problem [<xref ref-type="bibr" rid="ref9">9</xref>]. These drawbacks could be avoided if it were possible to collect equally informative data without having to rely on the participants’ active involvement.</p>
      </sec>
      <sec>
        <title>Mobile Sensing and Language</title>
        <p>One such unobtrusive (passive) data collection method as an alternative to ESM is mobile sensing [<xref ref-type="bibr" rid="ref13">13</xref>]. Whenever we use or carry our mobile devices, mobile sensors and user logs such as light sensors, accelerometers, and app use logs are registered as traces of our digital behavior [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. Given the pervasiveness of smartphones, this continuous flow of information might enable the automatic and unobtrusive detection of behavioral features such as sleep, social behavior, or even mood disorder episodes to aid in research and clinical practice [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref16">16</xref>-<xref ref-type="bibr" rid="ref19">19</xref>].</p>
        <p>We need emotionally valid data that can be captured by a smartphone to be able to use mobile sensing in the detection of emotion and mood disorders. Language is one of the ways in which people (digitally) express their emotions [<xref ref-type="bibr" rid="ref20">20</xref>]. Both language and emotions also serve as communication and cooperation tools and mutually influence each other [<xref ref-type="bibr" rid="ref21">21</xref>]. People explicitly or implicitly convey emotions to their interaction partners through what they say and how they say it [<xref ref-type="bibr" rid="ref22">22</xref>-<xref ref-type="bibr" rid="ref26">26</xref>]. Therefore, in this paper, we will examine to what extent language-based data collected through mobile sensing can be instrumental for the prediction of momentary and trait emotion. We make a distinction, on the one hand, between what people communicate (content) and how they communicate it (form) and, in contrast, between speech and writing, resulting in 4 types of language data (<xref ref-type="boxed-text" rid="box1">Textbox 1</xref>).</p>
        <boxed-text id="box1" position="float">
          <title>Types of language data.</title>
          <p>
            <bold>Types of language data</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Speech content: spoken words</p>
            </list-item>
            <list-item>
              <p>Speech form: voice acoustics (eg, pitch and timbre)</p>
            </list-item>
            <list-item>
              <p>Writing content: written words</p>
            </list-item>
            <list-item>
              <p>Writing form: typing dynamics (eg, typing speed and key press duration)</p>
            </list-item>
          </list>
        </boxed-text>
      </sec>
      <sec>
        <title>Previous Related Work</title>
        <sec>
          <title>Speech Content</title>
          <p>Studies on speech and emotional word use have generally focused on positive or negative emotions. Induced positive emotions coincide with more positive and less negative emotions between persons [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. In addition, in natural language snippets, a positive association between trait positive affectivity and positive emotion words was found [<xref ref-type="bibr" rid="ref29">29</xref>]. Higher trait negative affectivity and higher within-person negative emotions coincided with more negative emotions and more sadness-related words in experimental and natural settings [<xref ref-type="bibr" rid="ref27">27</xref>-<xref ref-type="bibr" rid="ref29">29</xref>]. However, a recent study did not find any significant correlations between emotion words and self-reported emotions either within or between persons [<xref ref-type="bibr" rid="ref30">30</xref>].</p>
          <p>Because of these inconsistencies, <italic>The Secret Life of Pronouns</italic> supports the use of nonemotion words to assess emotional tone. In particular, depression and negative emotionality show a small correlation with first-person pronouns [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. A larger variety of studies was conducted with writing, which will be further addressed in the <italic>Writing Content</italic> section.</p>
        </sec>
        <sec>
          <title>Speech Form</title>
          <p>Each voice has a unique sound because of age, gender, and accent. However, psychological features such as attitudes, intentions, and emotions also affect our sound [<xref ref-type="bibr" rid="ref26">26</xref>]. Johnstone and Scherer [<xref ref-type="bibr" rid="ref33">33</xref>] discern three types of features: time-related (eg, speech rate and speech duration), intensity-related (eg, speaking intensity and loudness), and features related to the fundamental frequency (F0; eg, F0 floor and F0 range). A fourth type could be timbre-related features (eg, jitter, shimmer, and formants). (Mobile-sensed) voice features have repeatedly been used in affective computing for the automatic classification of depression, bipolar disorder, and Parkinson disease [<xref ref-type="bibr" rid="ref34">34</xref>-<xref ref-type="bibr" rid="ref38">38</xref>].</p>
          <p>Higher-arousal emotions (eg, fear, anger, and joy) generally induce a higher speech intensity, F0, and speech rate, whereas lower-arousal emotions (eg, sadness and boredom) induce a lower speech intensity, F0, and speech rate (<xref ref-type="table" rid="table1">Table 1</xref>) [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref43">43</xref>]. Other features include a harmonics to noise ratio, which was found unrelated to arousal [<xref ref-type="bibr" rid="ref44">44</xref>], and jitter, which showed a positive correlation with depression [<xref ref-type="bibr" rid="ref45">45</xref>]. Arousal has been easiest to detect based on voice acoustics [<xref ref-type="bibr" rid="ref46">46</xref>]. Discrete emotion recognition based on these features in deep neural networks has also been successful [<xref ref-type="bibr" rid="ref47">47</xref>]. It is not yet clear whether these features could also discriminate between discrete emotions in simple models [<xref ref-type="bibr" rid="ref48">48</xref>].</p>
          <table-wrap position="float" id="table1">
            <label>Table 1</label>
            <caption>
              <p>Expected emotion–speech form correlations.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="90"/>
              <col width="90"/>
              <col width="50"/>
              <col width="60"/>
              <col width="50"/>
              <col width="50"/>
              <col width="80"/>
              <col width="80"/>
              <col width="80"/>
              <col width="60"/>
              <col width="90"/>
              <col width="70"/>
              <col width="70"/>
              <col width="80"/>
              <thead>
                <tr valign="top">
                  <td>Emotion</td>
                  <td>F0<sup>a</sup>-mean</td>
                  <td>F0-SD</td>
                  <td>F0-range</td>
                  <td>F0-rise</td>
                  <td>F0-fall</td>
                  <td>Loudness mean</td>
                  <td>Loudness rise</td>
                  <td>Loudness fall</td>
                  <td>Jitter<sup>b</sup></td>
                  <td>Shimmer<sup>c</sup></td>
                  <td>HNR<sup>d</sup></td>
                  <td>Speech rate</td>
                  <td>Pause duration</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Valence</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Arousal</td>
                  <td>(+)<sup>e</sup></td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>(+=)<sup>f</sup></td>
                  <td>(−)<sup>g</sup></td>
                </tr>
                <tr valign="top">
                  <td>Anger</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+=</td>
                  <td>+=</td>
                  <td>+</td>
                  <td>(+−)<sup>h</sup></td>
                  <td>−</td>
                </tr>
                <tr valign="top">
                  <td>Anxiety</td>
                  <td>+</td>
                  <td>+−</td>
                  <td>+</td>
                  <td>+−</td>
                  <td>+−</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>+</td>
                  <td>−</td>
                  <td>+−</td>
                  <td>+−</td>
                </tr>
                <tr valign="top">
                  <td>Sadness or depression</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>−</td>
                  <td>−</td>
                  <td>+</td>
                </tr>
                <tr valign="top">
                  <td>Stress</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>−</td>
                </tr>
                <tr valign="top">
                  <td>Happiness</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+=</td>
                  <td>+=</td>
                  <td>+</td>
                  <td>+</td>
                  <td>−</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table1fn1">
                <p><sup>a</sup>F0: fundamental frequency.</p>
              </fn>
              <fn id="table1fn2">
                <p><sup>b</sup>Deviations in individual consecutive fundamental frequency period lengths.</p>
              </fn>
              <fn id="table1fn3">
                <p><sup>c</sup>Difference in the peak amplitudes of consecutive fundamental frequency periods.</p>
              </fn>
              <fn id="table1fn4">
                <p><sup>d</sup>HNR: harmonics to noise ratio (energy in harmonic components and energy in noise components).</p>
              </fn>
              <fn id="table1fn5">
                <p><sup>e</sup>Positive correlation.</p>
              </fn>
              <fn id="table1fn6">
                <p><sup>f</sup>Positive or no correlation.</p>
              </fn>
              <fn id="table1fn7">
                <p><sup>g</sup>Negative correlation.</p>
              </fn>
              <fn id="table1fn8">
                <p><sup>h</sup>Undirected correlation.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Writing Content</title>
          <p>Higher valence has repeatedly been associated with more positive and less negative emotion words on a within- and between-person level, along with a higher word count in both natural and induced emotion conditions (<xref ref-type="table" rid="table2">Table 2</xref>) [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref51">51</xref>]. Other studies have demonstrated 1-time links between higher valence and more exclamation marks and fewer negations between persons and between higher valence and less sadness-related words within persons [<xref ref-type="bibr" rid="ref50">50</xref>,<xref ref-type="bibr" rid="ref51">51</xref>], although the latter 2 have also been found to be unrelated [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref49">49</xref>]. Pennebaker [<xref ref-type="bibr" rid="ref52">52</xref>] states that people use more first-person plural pronouns when they are happy.</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Expected emotion–speech and writing content correlations.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="110"/>
              <col width="60"/>
              <col width="50"/>
              <col width="50"/>
              <col width="50"/>
              <col width="80"/>
              <col width="90"/>
              <col width="90"/>
              <col width="60"/>
              <col width="70"/>
              <col width="50"/>
              <col width="80"/>
              <col width="70"/>
              <col width="90"/>
              <thead>
                <tr valign="bottom">
                  <td>Emotion</td>
                  <td>WC<sup>a</sup></td>
                  <td>I</td>
                  <td>We</td>
                  <td>You</td>
                  <td>Negate</td>
                  <td>Posemo<sup>b</sup></td>
                  <td>Negemo<sup>c</sup></td>
                  <td>Anx<sup>d</sup></td>
                  <td>Anger</td>
                  <td>Sad</td>
                  <td>Certain<sup>e</sup></td>
                  <td>Swear</td>
                  <td>Exclam<sup>f</sup></td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Valence</td>
                  <td>(+)<sup>g</sup></td>
                  <td>(−)<sup>h</sup></td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>+</td>
                </tr>
                <tr valign="top">
                  <td>Arousal</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Anger</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Anxiety</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Sadness</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Stress</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Happiness</td>
                  <td>+</td>
                  <td>−</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>+</td>
                </tr>
                <tr valign="top">
                  <td>Depression</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table2fn1">
                <p><sup>a</sup>WC: word count.</p>
              </fn>
              <fn id="table2fn2">
                <p><sup>b</sup>Posemo: positive emotions.</p>
              </fn>
              <fn id="table2fn3">
                <p><sup>c</sup>Negemo: negative emotions.</p>
              </fn>
              <fn id="table2fn4">
                <p><sup>d</sup>Anx: anxiety.</p>
              </fn>
              <fn id="table2fn5">
                <p><sup>e</sup>Certain: absolutist words.</p>
              </fn>
              <fn id="table2fn6">
                <p><sup>f</sup>Exclam: exclamation marks.</p>
              </fn>
              <fn id="table2fn7">
                <p><sup>g</sup>Positive correlation.</p>
              </fn>
              <fn id="table2fn8">
                <p><sup>h</sup>Negative correlation.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <p>Negative emotion, anxiety, and anger words recur as linguistic markers of anger within and between persons [<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. Pennebaker [<xref ref-type="bibr" rid="ref52">52</xref>] adds to that the use of second-person pronouns. Recurrent linguistic markers of trait anxiety include negative emotion, sadness, and anger words [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. The results with explicit anxiety words are mixed, and some isolated findings suggest a relationship with first-person, negation, swear, and certainty words [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. Momentary and trait sadness have been linked to more negative emotion, sadness, and anger words in multiple studies [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref49">49</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. In contrast, they were unrelated to sadness words in daily diaries [<xref ref-type="bibr" rid="ref51">51</xref>]. A positive correlation existed between stress on one side and negative emotion and anger words between and within persons on the other [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. Anxiety words have been related to stress both on a weekly and daily level [<xref ref-type="bibr" rid="ref51">51</xref>], but this could not be replicated with trait stress [<xref ref-type="bibr" rid="ref54">54</xref>]. Apart from the explicit emotion categories, several studies have linked depressive symptoms to the use of <italic>I</italic> words [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref58">58</xref>]. Other correlations include more negative emotion words, more swear words, and more negations [<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref59">59</xref>,<xref ref-type="bibr" rid="ref60">60</xref>]. More anxiety, sadness, and anger words were found in 1 study but were not significant in all studies [<xref ref-type="bibr" rid="ref51">51</xref>,<xref ref-type="bibr" rid="ref54">54</xref>]. In fact, Capecelatro et al [<xref ref-type="bibr" rid="ref31">31</xref>] found depression to be unrelated to all Linguistic Inquiry and Word Count (LIWC) emotion categories.</p>
        </sec>
        <sec>
          <title>Writing Form</title>
          <p>Initially, studies concerning typing dynamics used external computer keyboards to predict stress and depression, among other emotions [<xref ref-type="bibr" rid="ref61">61</xref>-<xref ref-type="bibr" rid="ref65">65</xref>]. More recent studies have tried to use soft keyboards on smartphones for emotion, depression, and bipolar disorder detection [<xref ref-type="bibr" rid="ref66">66</xref>-<xref ref-type="bibr" rid="ref69">69</xref>]. It has been easier to distinguish between broad emotion dimensions—valence in 1 study and arousal in another [<xref ref-type="bibr" rid="ref66">66</xref>,<xref ref-type="bibr" rid="ref70">70</xref>].</p>
          <p>Despite the high predictive accuracies of deep learning models, separate correlations between emotional states and typing dynamics are small (<xref ref-type="table" rid="table3">Table 3</xref>). They exist between increased arousal and decreased keystroke duration and latency [<xref ref-type="bibr" rid="ref70">70</xref>]. The dynamics used in depression detection include a shorter key press duration and latency, with a medium reduction in duration for severe depression but a high reduction for mild depression [<xref ref-type="bibr" rid="ref61">61</xref>]. No correlation was found between depression and the number of backspaces. For emotions, typing speed was the most predictive feature [<xref ref-type="bibr" rid="ref66">66</xref>].</p>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Expected emotion–writing form correlations.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="180"/>
              <col width="150"/>
              <col width="130"/>
              <col width="150"/>
              <col width="130"/>
              <col width="130"/>
              <col width="130"/>
              <thead>
                <tr valign="top">
                  <td>Emotion</td>
                  <td>Number of characters</td>
                  <td>Typing speed</td>
                  <td>Average key press duration</td>
                  <td>Number of entries</td>
                  <td>Backspaces</td>
                  <td>Typing duration</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Valence</td>
                  <td>(+)<sup>a</sup></td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Arousal</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>(−)<sup>b</sup></td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>−</td>
                </tr>
                <tr valign="top">
                  <td>Anger</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Anxiety</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Sadness</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Stress</td>
                  <td>
                    <break/>
                  </td>
                  <td>+</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                  <td>−</td>
                </tr>
                <tr valign="top">
                  <td>Happiness</td>
                  <td>+</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
                <tr valign="top">
                  <td>Depression</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>−</td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table3fn1">
                <p><sup>a</sup>Positive correlation.</p>
              </fn>
              <fn id="table3fn2">
                <p><sup>b</sup>Negative correlation.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
      </sec>
      <sec>
        <title>This Study</title>
        <p>Despite this body of research, crucial questions remain. For instance, most research has focused on between-person relationships, whereas few studies have looked at state emotions within persons. Therefore, it is unclear to what extent mobile-sensed language can help predict moment-to-moment changes within individuals. Previous research has typically also examined particular language features in isolation. As a result, we do not know how the different types of language data compare in their predictive value nor to what extent combining them may enhance the prediction of moment-to-moment and trait emotions.</p>
        <p>In this study, we will examine the separate and combined predictive value of 4 mobile-sensed language data sources for detecting momentary emotional experience as well as emotional traits and depression. A 2-week ESM study was designed, querying participants to indicate their valence, arousal, anger, anxiety, sadness, stress, and happiness on their smartphones 10 times a day. In addition, a custom-built app recorded data from several sensors. Relevant to this study, the participants were asked to use the provided custom keyboard software as often as possible and to make a voice recording regarding their emotional state at the end of each ESM survey. On the basis of these data, we will examine how self-reported emotional experience is correlated and can be predicted with spoken and written word use, acoustic voice features, and typing dynamics.</p>
        <p>This study goes beyond previous work by comparing and combining all four sources of language behavior: speech, writing, content, and form. In addition, this study will examine the prediction of emotion traits as well as moment-to-moment emotional fluctuations in daily life, providing a comprehensive picture of the potential of language-based smartphone-sensing data for emotion detection.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Participants</title>
        <p>Participants were recruited through notices on social media groups and notice boards around university buildings. In this notice, people were directed to a web survey for selection purposes. This web survey queried an email address, age, gender, and questions regarding the inclusion criteria. These entailed Dutch as mother tongue, availability for the duration of the study, ownership of an Android smartphone that supported the sensing app (not iPhone, Huawei, Wiko, Medion, or Xiaomi), always carrying that smartphone, and activating it at least 10 times a day. A total of 230 people completed the web survey, of whom 116 (50.4%) were excluded based on the aforementioned criteria. Of the remaining 114 people, 69 (60.5%) agreed to participate in the study. In the laboratory, 3% (2/69) of participants refused to sign the informed consent, and the installation of the apps failed with another 3% (2/69) of participants, leaving 65 actual participants. For the analyses, an extra inclusion criterion of having answered at least 30 surveys led to the exclusion of another 8% (5/65) of participants. Of the remaining 60 participants, 17 (28%) were men, and 43 (72%) were women (mean age 21.85 years, SD 2.31 years; range 17-32 years).</p>
        <p>The participants were reimbursed depending on their cooperation in the study. A maximum of €50 (US $56) could be earned. A total of €10 (US $11.2) were earned after completing some baseline trait questionnaires at the start of the study. Another €5 (US $5.6) could be earned per 10% completed ESM surveys, ending at 80% completed surveys. This is a standard practice in ESM research. This study was approved by the Societal Ethical Commission of Katholieke Universiteit Leuven (G-2018 01 1095).</p>
      </sec>
      <sec>
        <title>Materials</title>
        <sec>
          <title>Mobile Sensing</title>
          <p>A total of 2 apps were installed on each smartphone. The first one, a custom-built app called Actitrack, recorded data from multiple mobile sensors, such as screen locks, light sensors, and location. The software also provided a custom onscreen keyboard display that could be used instead of the default soft keyboard on the host smartphone. This way, the app could register all typing activity with the custom keyboard as it had no access to the default keyboard. Because of the precariousness of these data, privacy measures were taken. All data were securely sent over https to a central server of Katholieke Universiteit Leuven and stored in 2 different files.</p>
          <p>This study solely focused on the sensed keyboard and voice data. The participants were asked to use the custom-made keyboard as often as possible to render enough writing data. While doing so, the following variables were stored: content of the message, number of backspaces, number of characters, typing speed, typing duration, average duration of a key press, number of positive emojis, and number of negative emojis.</p>
          <p>After each ESM survey, the participants were redirected to the sensing app to record a voice message. In the app, there was a button to start and a button to decline, and the instruction read “Make a recording of about one minute about what you have done and how it made you feel. Good luck!” This meant that keyboard activity was passively sensed the entire time of the study, whereas voice recordings were actively prompted and initiated by the participants. As the keyboard messages and voice recordings might contain sensitive personal information, the files were encrypted separately and could only be stored and handled on computers with an encrypted hard drive.</p>
        </sec>
        <sec>
          <title>ESM Approach</title>
          <p>The second app, MobileQ, delivered the ESM surveys [<xref ref-type="bibr" rid="ref71">71</xref>]. A total of 10 times a day for 2 weeks, the participants were prompted to answer some questions, including current levels of valence, arousal, anger, anxiety, sadness, stress, and happiness, using a visual analogue scale (0-100). The first notification of each day was sent randomly between 10 AM and 11 AM, including a question about sleep quality. The other 9 surveys were semirandom, dividing the time between 11 AM and 10 PM into 9 equal blocks and randomly programming a beep in each block. Other questions concerned where and with whom the participant was, what they were doing, if the app had worked without problems, and whether something positive or negative had happened since the last survey, but these questions are not analyzed in this paper.</p>
        </sec>
        <sec>
          <title>Mental Health Survey</title>
          <p>At the beginning of the study, each participant completed a mental health and personality survey. In this study, only the depression subscale of the Depression, Anxiety, and Stress Scale (DASS) was used [<xref ref-type="bibr" rid="ref72">72</xref>]. The DASS contains 21 statements, and the participants must indicate how much these applied to them on a scale of 0 to 3. The depression subscale is an average score of 7 items.</p>
        </sec>
      </sec>
      <sec>
        <title>Procedure</title>
        <p>After meeting the inclusion criteria, the participants attended a session in the laboratory. During each session, an informed consent was first proposed and signed. Next, the 2 apps were installed on the participants’ smartphones, and they received a booklet with user instructions and a unique participant number. The booklet included instructions to keep the phone turned on, charge it at night, not lend it to a friend, switch off the screen lock, and be connected to Wi-Fi as much as possible. It also included a guide on how to install and uninstall the apps. Finally, the participants were asked to complete the trait questionnaires. For each participant, the 2-week study began the day after the session, and the apps were automatically deactivated after 15 days. There was an optional feedback session at the end where the participants could receive a debriefing and help with uninstallation. The 60 participants that reached the cutoff of 30 completed surveys responded on average to 109.3 (SD 22) of the 140 notifications, yielding a compliance rate of 78% (mean compliance 0.78, SD 0.16; range 0.26-0.99).</p>
      </sec>
      <sec>
        <title>Data Preprocessing</title>
        <p>The voice samples were converted to text files to be able to analyze the words used in speech. The voice recordings were initially transcribed using the open-source transcriber software Kaldi (NVIDIA) [<xref ref-type="bibr" rid="ref73">73</xref>]; however, as the transcripts contained many language errors, all of them were corrected by hand. These text files were then used for the automated word counting. All following data processing and analyses were performed using R (version 4.0.3; R Foundation for Statistical Computing) [<xref ref-type="bibr" rid="ref74">74</xref>]. First, all voice recordings and keyboard activities were linked to their corresponding ESM surveys based on their timestamps. If the timestamps were not an exact match, voice recordings within 5 minutes of an ESM timestamp were linked to that corresponding survey. Keyboard activity was binned into intervals ranging from 30 minutes before to 30 minutes after an ESM survey by pooling all messages and summing the typing dynamics except for typing speed and average key press duration, for which the mean was taken. Second, all participants with &#60;30 responses or without a single voice recording or keyboard activity were removed. This left 51 participants with a total of 1015 voice recordings and 59 participants with a total of 3929 keyboard bins. Finally, all used measures were prepared for the momentary- and trait-level analyses. For the momentary-level analyses, all observations were standardized within participants. For the trait-level analyses, all observations of a given participant were aggregated into 1 single observation to be used in a between-person context along with the DASS score. The momentary level thus reflects emotional states from one moment to another, whereas the trait level represents the average mood of the participant over the duration of the study. Standardization happened only over the observations with an ESM survey as well as keyboard or voice recordings.</p>
      </sec>
      <sec>
        <title>Feature Extraction</title>
        <sec>
          <title>Speech Content</title>
          <p>The content of the voice recordings was analyzed using the LIWC software [<xref ref-type="bibr" rid="ref75">75</xref>]. LIWC is a language processing tool that allows for the automated counting and labeling of words. LIWC counts and categorizes words going from pronouns to swear words to religion- or death-related words. Each category is then presented as a percentage of counted words on the total number of words. In this study, the automatically generated Dutch translation of the LIWC 2015 dictionary was used [<xref ref-type="bibr" rid="ref76">76</xref>]. Twelve categories were selected based on the reviewed literature: <italic>word count</italic>, <italic>i</italic>, <italic>we</italic>, <italic>you</italic>, <italic>negate</italic>, <italic>posemo</italic>, <italic>negemo</italic>, <italic>anxiety</italic>, <italic>anger</italic>, <italic>sad</italic>, <italic>certain</italic>, and <italic>swear</italic> (<xref ref-type="table" rid="table4">Table 4</xref>).</p>
          <table-wrap position="float" id="table4">
            <label>Table 4</label>
            <caption>
              <p>Descriptive statistics of the speech data.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="570"/>
              <col width="400"/>
              <thead>
                <tr valign="top">
                  <td colspan="2">Item</td>
                  <td>Value, mean (SD; range)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="3">
                    <bold>Emotions<sup>a</sup></bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Valence</td>
                  <td>56.21 (11.3; 22.57 to 83.42)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Arousal</td>
                  <td>44.7 (11.35; 18.41 to 77.21)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anger</td>
                  <td>10.63 (9.08; 1.7 to 52.05)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anxiety</td>
                  <td>12.47 (12.62; 1.35 to 56.31)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Sadness</td>
                  <td>13.06 (9.38; 1.84 to 43.1)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Stress</td>
                  <td>27.58 (15.15; 5.08 to 74.16)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Happiness</td>
                  <td>56.44 (11.32; 21.41 to 80.68)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Depression</td>
                  <td>0.42 (0.45; 0 to 2.14)</td>
                </tr>
                <tr valign="top">
                  <td colspan="3">
                    <bold>Speech content<sup>b</sup></bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>WC (word count)</td>
                  <td>60.72 (31.76; 4 to 125.63)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>I (first-person singular)</td>
                  <td>9.44 (3.69; 0 to 19.09)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>We (first-person plural)</td>
                  <td>0.58 (0.83; 0 to 3.7)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>You (second-person singular)</td>
                  <td>0.06 (0.11; 0 to 0.41)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Negate (negations)</td>
                  <td>1.29 (0.75; 0 to 3.28)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Posemo (positive emotion words)</td>
                  <td>3.54 (2.04; 0 to 12.5)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Negemo (negative emotion words)</td>
                  <td>0.98 (0.72; 0 to 2.73)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anx (anxiety-related words)</td>
                  <td>0.36 (0.52; 0 to 2.38)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anger (anger-related words)</td>
                  <td>0.27 (0.35; 0 to 1.47)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Sad (sadness-related words)</td>
                  <td>0.16 (0.18; 0 to 0.76)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Certain (absolutist words)</td>
                  <td>1.59 (1.36; 0 to 7.71)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Swear (swear words)</td>
                  <td>0 (0.03; 0 to 0.19)</td>
                </tr>
                <tr valign="top">
                  <td colspan="3">
                    <bold>Speech form<sup>c</sup></bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>F0<sup>d</sup> mean</td>
                  <td>29.93 (4.26; 20.25 to 40.63)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>F0 SD</td>
                  <td>0.22 (0.05; 0.13 to 0.42)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>F0 range</td>
                  <td>7.52 (3.63; 2.29 to 19.4)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>F0 mean rising slope</td>
                  <td>303.85 (76.4; 126.97 to 556.56)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>F0 mean falling slope</td>
                  <td>155.13 (50.45; 88.93 to 336.52)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Loudness mean</td>
                  <td>0.77 (0.37; 0.19 to 2.1)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Loudness mean rising slope</td>
                  <td>12.85 (5.01; 3.43 to 26.76)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Loudness mean falling slope</td>
                  <td>10.02 (4.08; 2.52 to 17.81)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Jitter mean</td>
                  <td>0.05 (0.01; 0.03 to 0.07)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Shimmer mean</td>
                  <td>1.29 (0.16; 1.02 to 1.75)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>HNR<sup>e</sup> mean</td>
                  <td>4.61 (2.44; −4.16 to 8.6)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Voiced segments per second (speech rate)</td>
                  <td>2.12 (0.48; 0.55 to 3.38)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Mean unvoiced segment length (pause duration)</td>
                  <td>0.29 (0.56; 0.11 to 4.16)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table4fn1">
                <p><sup>a</sup>Emotions were rated on a visual analogue scale of 0-100, and depression was rated on a scale of 0-3.</p>
              </fn>
              <fn id="table4fn2">
                <p><sup>b</sup>Except for word count, all Linguistic Inquiry and Word Count dimensions display percentages of the total word count.</p>
              </fn>
              <fn id="table4fn3">
                <p><sup>c</sup>Fundamental frequency measures are logarithmic transformations on a semitone frequency scale starting at 27.5 Hz. Loudness measures are the perceived signal intensity. The harmonics to noise ratio displays an energy-related harmonics to noise ratio and is indicative of voice quality along with jitter and shimmer.</p>
              </fn>
              <fn id="table4fn4">
                <p><sup>d</sup>F0: fundamental frequency.</p>
              </fn>
              <fn id="table4fn5">
                <p><sup>e</sup>HNR: harmonics to noise ratio.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Speech Form</title>
          <p>The acoustic features of the voice recordings were extracted using the openSMILE software (audEERING GmbH) [<xref ref-type="bibr" rid="ref77">77</xref>]. OpenSMILE is an open-source audio feature extraction toolkit with SMILE, which stands for speech and music interpretation by large-space extraction. The newest version, openSMILE 3.0, provides a simpler package for Python. We chose the Geneva Minimalistic Acoustic Parameter Set, which provides some basic statistics such as the mean and SD for a minor set of acoustic features [<xref ref-type="bibr" rid="ref78">78</xref>]. Thirteen parameters were selected based on the reviewed literature: F0 mean, F0 range, F0 SD, F0 mean rising slope, F0 mean falling slope, loudness mean, loudness mean rising slope, loudness mean falling slope, mean jitter, mean shimmer, mean harmonics to noise ratio, voiced segments per second, and mean unvoiced segment length (<xref ref-type="table" rid="table4">Table 4</xref>). The first 5 relate to the pitch of the voice, the next 3 concern the loudness, the next 3 define the voice quality or timbre, and the last 2 can be interpreted as speech rate and mean pause duration.</p>
        </sec>
        <sec>
          <title>Writing Content</title>
          <p>The content of the writing was analyzed in the same way as the content of the voice recordings—by using the LIWC software and the 12 chosen categories, adding also <italic>exclamation marks</italic> (<xref ref-type="table" rid="table5">Table 5</xref>).</p>
          <table-wrap position="float" id="table5">
            <label>Table 5</label>
            <caption>
              <p>Descriptive statistics of the writing data.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="570"/>
              <col width="0"/>
              <col width="400"/>
              <thead>
                <tr valign="top">
                  <td colspan="3">Item</td>
                  <td>Value, mean (SD; range)</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="4">
                    <bold>Emotions<sup>a</sup></bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Valence</td>
                  <td colspan="2">56.07 (10.88; 22.57-83.42)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Arousal</td>
                  <td colspan="2">44.34 (11.67; 9.27-77.21)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anger</td>
                  <td colspan="2">10.49 (8.8; 1.5-52.05)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anxiety</td>
                  <td colspan="2">12.14 (12.2; 0.15-56.31)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Sadness</td>
                  <td colspan="2">12.82 (9.35; 1.84-43.1)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Stress</td>
                  <td colspan="2">26.85 (15.12; 3.31-74.16)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Happiness</td>
                  <td colspan="2">56.31 (10.97; 21.41-80.68)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Depression</td>
                  <td colspan="2">0.45 (0.48; 0-2.14)</td>
                </tr>
                <tr valign="top">
                  <td colspan="4">
                    <bold>Writing content<sup>b</sup></bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Positive emojis</td>
                  <td colspan="2">1.4 (5.94; 0-45.09)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Negative emojis</td>
                  <td colspan="2">0.15 (0.26; 0-1.35)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>WC (word count)</td>
                  <td colspan="2">82.4 (58.45; 1.8-358.93)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>I (first-person singular)</td>
                  <td colspan="2">3.21 (1.22; 0-5.31)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>We (first-person plural)</td>
                  <td colspan="2">0.57 (0.34; 0-1.38)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>You (second-person singular)</td>
                  <td colspan="2">2.21 (0.82; 0.52-5)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Negate (negations)</td>
                  <td colspan="2">1.44 (0.81; 0-3.83)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Posemo (positive emotion words)</td>
                  <td colspan="2">0.1 (0.11; 0-0.38)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Negemo (negative emotion words)</td>
                  <td colspan="2">3.48 (1.57; 0-8.25)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anx (anxiety-related words)</td>
                  <td colspan="2">0.85 (0.39; 0-1.68)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Anger (anger-related words)</td>
                  <td colspan="2">0.12 (0.12; 0-0.55)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Sad (sadness-related words)</td>
                  <td colspan="2">0.26 (0.2; 0-0.81)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Certain (absolutist words)</td>
                  <td colspan="2">0.24 (0.15; 0-0.65)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Swear (swear words)</td>
                  <td colspan="2">2.31 (1.06; 0-4.86)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Exclam (exclamation marks)</td>
                  <td colspan="2">1.56 (1.8; 0-9.26)</td>
                </tr>
                <tr valign="top">
                  <td colspan="4">
                    <bold>Writing form<sup>c</sup></bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Characters, N</td>
                  <td colspan="2">480.11 (293.67; 12.7-1764.5)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Typing speed (characters per second)</td>
                  <td colspan="2">2.1 (0.55; 1.18-4.68)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Average key press duration (ms)</td>
                  <td colspan="2">79.95 (16.48; 20.68-122.83)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Entries, N</td>
                  <td colspan="2">15.37 (10.69; 1-71.92)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Total backspaces, N</td>
                  <td colspan="2">0.17 (0.06; 0-0.3)</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Total typing duration (seconds)</td>
                  <td colspan="2">2.68 (2.18; 0.44-9.85)</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table5fn1">
                <p><sup>a</sup>Emotions were rated on a visual analogue scale of 0-100, and depression was rated on a scale of 0-3.</p>
              </fn>
              <fn id="table5fn2">
                <p><sup>b</sup>Except for word count, all Linguistic Inquiry and Word Count dimensions display percentages of the total word count.</p>
              </fn>
              <fn id="table5fn3">
                <p><sup>c</sup>Number of backspaces and typing duration are divided by the total number of keystrokes (characters + backspaces).</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Writing Form</title>
          <p>The typing dynamics were immediately recorded during typing without any additional software. The variables extracted from the custom-made keyboard were the number of backspaces, typing duration, typing speed, number of characters, and average duration of a key press (<xref ref-type="table" rid="table5">Table 5</xref>). The absolute number of backspaces and typing duration were transformed into the relative number on the total number of keystrokes for that bin (characters + backspaces). After binning, the number of keyboard entries (eg, separate messages and notes) collected in that bin was also counted.</p>
        </sec>
      </sec>
      <sec>
        <title>Correlation Analyses</title>
        <p>After standardization, pairwise correlations were computed between the emotions on one side and the language features on the other. At the momentary level, this was done by extracting the slopes of multilevel simple linear regressions using the lme4 and lmerTest packages in R with the restricted maximum likelihood modeling. At the trait level, Spearman correlations were applied to the aggregated data set. On each correlation table, a false discovery rate (FDR) correction was applied according to the step-down method by Holm [<xref ref-type="bibr" rid="ref79">79</xref>].</p>
      </sec>
      <sec>
        <title>Predictive Modeling</title>
        <p>Next, we were interested in how well the language features would predict emotional states and traits. The total data set for voice and keyboard separately was divided into an 80% training and 20% test set. We used all the significant correlations of the previous analyses for the 4 language types separately as possible predictors for a given emotion in a linear regression model with a random intercept and varying slopes for participants at the momentary level, allowing predictors to have different values for each participant. When the correlation analysis yielded no significant correlations for an emotion, the 3 most highly correlated features were chosen as possible predictors. A 10-fold cross-validation on the training set was applied to determine which of the possible predictors had an average <italic>P</italic> value of &#60;.05, and those were kept in the model. When there were no predictors with an average <italic>P</italic> value of &#60;.05, the 2 best predictors were chosen to prevent overfitting of the training set. Finally, a model with the chosen predictors was fitted on the total training set, and then we calculated the predictive <italic>R</italic><sup>2</sup> based on that model and the test set. The predictive <italic>R</italic><sup>2</sup> is calculated as the mean squared error divided by the variance of the data, making it scale-independent:</p>
        <graphic xlink:href="mental_v9i2e31724_fig13.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        <p>As we noticed that a different split of the test and training sets yielded different results, especially for the trait level, we chose to perform a 50-fold variation of the training and test sets in a bootstrap-like manner. This means we randomly created 50 different splits of the observations into 80% training and 20% test sets.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Descriptives</title>
        <sec>
          <title>Speech</title>
          <p>A total of 51 participants (51/60, 85%) recorded between 1 and 96 voice samples on the total number of ESM surveys they completed, with an average compliance rate of 19% (speech mean 0.19, SD 0.21; range 0.01-0.94). Within participants, there was a significant correlation between the day of the study (1 to 14) and the number of voice recordings (<italic>r</italic>=−0.36; <italic>P</italic>&#60;.001), meaning that compliance decreased during the study. For the descriptive statistics of all speech measures, we looked at the distribution of the within-person averages (<xref ref-type="table" rid="table4">Table 4</xref>). The participants showed sufficient variability in their emotions. <italic>I</italic> and <italic>posemo</italic> were the most counted words, although, in general, the LIWC dimensions only accounted for a small share of the total amount of spoken words. When looking at depression, we saw a large cluster of DASS depression scores between 0 and 0.75 and then 6 sparse points reaching &#62;0.75. The maximum of the scale was 3, which could mean that our sample lacked the sensitivity to register any significant relationships between depressive symptoms and the 4 language types.</p>
        </sec>
        <sec>
          <title>Writing</title>
          <p>A total of 59 participants (59/60, 98%) used the custom-made keyboard between 5 and 117 times in the hour around their completed ESM surveys, with an average use rate of 60% (writing mean 0.60, SD 0.21; range 0.07-0.95). Here, again, use declined throughout the study within participants (<italic>r</italic>=−0.23; <italic>P&#60;</italic>.001). Similar to the speech data, for the descriptive statistics, we looked at the distribution of the within-person averages (<xref ref-type="table" rid="table5">Table 5</xref>). Overall, this sample showed the same depression and emotion distributions as the speech sample. <italic>I</italic>, <italic>negemo,</italic> and <italic>swear</italic> were the most counted words, although, again, the LIWC dimensions in general only accounted for a small share of the total amount of written words.</p>
        </sec>
      </sec>
      <sec>
        <title>Correlation Analyses</title>
        <sec>
          <title>Speech Content</title>
          <p>After the FDR correction at the momentary level, <italic>P</italic>&#60;.001 for all significant correlations mentioned here. Higher valence correlated with a lower word count; more <italic>we</italic> and positive emotion words; and fewer negations and negative emotion, anxiety, anger, and certainty words (<xref rid="figure1" ref-type="fig">Figure 1</xref>). Happiness showed the same relationships without word count and <italic>we</italic>. Arousal was only correlated with fewer negations and more positive emotion words. Anger showed positive correlations with negations, negative emotion words, and anger words. Anxiety was positively correlated with negative emotion, anger, and anxiety words. More sadness was associated with more negations and negative emotion, anger, and sadness words and with fewer positive emotion words. Finally, stress displayed the same correlations as sadness with anxiety instead of sadness words. At the trait level, some higher correlations arose at first but, after the FDR correction, no correlation was significant (<xref rid="figure2" ref-type="fig">Figure 2</xref>).</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>Multilevel correlations between the state emotions and speech content variables (n=1015). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction. Anger: anger-related words; anx: anxiety-related words; certain: absolutist words; I: first-person singular; negate: negations; negemo: negative emotion words; posemo: positive emotion words; sad: sadness-related words; swear: swear words; WC: word count; we: first-person plural; you: second-person singular.</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Spearman correlations between the trait emotions and speech content variables (n=51). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction. Anger: anger-related words; anx: anxiety-related words; certain: absolutist words; I: first-person singular; negate: negations; negemo: negative emotion words; posemo: positive emotion words; sad: sadness-related words; swear: swear words; WC: word count; we: first-person plural; you: second-person singular.</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Speech Form</title>
          <p>After the FDR correction at the momentary level, <italic>P</italic>&#60;.001 for all significant correlations mentioned here. Higher valence correlated with a higher mean loudness, mean loudness rising slope, and mean loudness falling slope, and a lower mean unvoiced segment length (<xref rid="figure3" ref-type="fig">Figure 3</xref>). Happiness showed the same relationships. Arousal correlated with higher values of all 3 loudness measures and a lower mean unvoiced segment length. Anger and anxiety showed no significant correlations after FDR correction. More sadness was associated with a lower mean loudness rising slope and mean loudness falling slope. Finally, stress displayed a significant correlation with a lower F0 range. At the trait level, the correlation values again increased, but none of these were significant (<xref rid="figure4" ref-type="fig">Figure 4</xref>).</p>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>Multilevel correlations between the state emotions and speech form variables (n=1015). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction.</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure4" position="float">
            <label>Figure 4</label>
            <caption>
              <p>Spearman correlations between the trait emotions and speech form variables (n=51). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction.</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Writing Content</title>
          <p>After the FDR correction at the momentary level, <italic>P</italic>&#60;.001 for all significant correlations mentioned here. Higher valence correlated with a lower word count and less first-person singular use (<xref rid="figure5" ref-type="fig">Figure 5</xref>). Happiness only correlated with a lower word count. Arousal, anxiety, and sadness showed no significant correlations after FDR correction. More anger was associated with a higher word count. Finally, stress displayed a correlation with a higher word count and first-person singular use. At the trait level, none of the correlations were significant (<xref rid="figure6" ref-type="fig">Figure 6</xref>).</p>
          <fig id="figure5" position="float">
            <label>Figure 5</label>
            <caption>
              <p>Multilevel correlations between the state emotions and writing content variables (n=3929). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction. Anger: anger-related words; anx: anxiety-related words; certain: absolutist words; exclam: exclamation marks; I: first-person singular; negate: negations; negemo: negative emotion words; posemo: positive emotion words; sad: sadness-related words; swear: swear words; WC: word count; we: first-person plural; you: second-person singular.</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure6" position="float">
            <label>Figure 6</label>
            <caption>
              <p>Spearman correlations between the trait emotions and writing content variables (n=59). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction. Anger: anger-related words; anx: anxiety-related words; certain: absolutist words; exclam: exclamation marks; I: first-person singular; negate: negations; negemo: negative emotion words; posemo: positive emotion words; sad: sadness-related words; swear: swear words; WC: word count; we: first-person plural; you: second-person singular.</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Writing Form</title>
          <p>After the FDR correction at the momentary level, <italic>P</italic>&#60;.001 for all significant correlations mentioned here. Higher valence and happiness correlated with a lower number of characters and keyboard entries (<xref rid="figure7" ref-type="fig">Figure 7</xref>). Arousal displayed a correlation with a shorter average key press duration. Anger correlated with a higher number of characters. Anxiety, sadness, and stress showed no significant correlations. At the trait level, no correlations were significant after FDR correction (<xref rid="figure8" ref-type="fig">Figure 8</xref>).</p>
          <fig id="figure7" position="float">
            <label>Figure 7</label>
            <caption>
              <p>Multilevel correlations between the state emotions and writing form variables (n=3929). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction. AvgDurationKeyPress: average key press duration; Backspaces.Tot: backspaces divided by the total amount of keystrokes (characters + backspaces); nCharacters: number of characters; nEntries: number of entries; TypingDuration.Tot: typing duration divided by the total amount of keystrokes (characters + backspaces).</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig7.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure8" position="float">
            <label>Figure 8</label>
            <caption>
              <p>Correlations between the trait emotions and writing form variables (n=59). *<italic>P</italic>&#60;.05, **<italic>P</italic>&#60;.01, ***<italic>P</italic>&#60;.001. Italicized values are significant after false discovery rate correction. AvgDurationKeyPress: average key press duration; Backspaces.Tot: backspaces divided by the total amount of keystrokes (characters + backspaces); nCharacters: number of characters; nEntries: number of entries; TypingDuration.Tot: typing duration divided by the total amount of keystrokes (characters + backspaces).</p>
            </caption>
            <graphic xlink:href="mental_v9i2e31724_fig8.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Predictive Modeling</title>
        <p>The highest predictive <italic>R</italic><sup>2</sup> at the momentary level was found for the prediction of happiness based on speech content (<italic>R</italic><sup>2</sup> mean 0.10, SD 0.04; <xref rid="figure9" ref-type="fig">Figure 9</xref>) followed by the prediction of valence based on speech content (<italic>R</italic><sup>2</sup> mean 0.06, SD 0.03) and speech form (<italic>R</italic><sup>2</sup> mean 0.05, SD 0.03). The mean <italic>R</italic><sup>2</sup> values of speech content models varied between 0.01 and 0.10, those of speech form varied between −0.01 and 0.05, those of writing content varied between 0 and 0.01, and those of writing form varied between −0.0002 and 0.01. At the trait level, the speech form models performed best, with the highest predictive <italic>R</italic><sup>2</sup> for the predictions of valence (<italic>R</italic><sup>2</sup> mean 0.16, SD 0.30), happiness (<italic>R</italic><sup>2</sup> mean 0.14, SD 0.40), and arousal (<italic>R</italic><sup>2</sup> mean 0.13, SD 0.25). All other mean predictive <italic>R</italic><sup>2</sup> values were negative except for the speech form prediction of stress (<italic>R</italic><sup>2</sup> mean 0.02, SD 0.25) and the speech content prediction of valence (<italic>R</italic><sup>2</sup> mean 0.01, SD 0.39; <xref rid="figure10" ref-type="fig">Figure 10</xref>).</p>
        <fig id="figure9" position="float">
          <label>Figure 9</label>
          <caption>
            <p>Predictive <italic>R</italic><sup>2</sup> of language data at the momentary level.</p>
          </caption>
          <graphic xlink:href="mental_v9i2e31724_fig9.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure10" position="float">
          <label>Figure 10</label>
          <caption>
            <p>Predictive <italic>R</italic><sup>2</sup> of language data at the trait level.</p>
          </caption>
          <graphic xlink:href="mental_v9i2e31724_fig10.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>Afterward, this process was repeated for the speech content and form features combined in 1 model, the writing content and form features combined in 1 model, and all 4 language features combined in 1 model (<xref rid="figure11" ref-type="fig">Figures 11</xref> and <xref rid="figure12" ref-type="fig">12</xref>). The highest predictive <italic>R</italic><sup>2</sup> at the momentary level was found for the prediction of happiness based on all language features (<italic>R</italic><sup>2</sup> mean 0.11, SD 0.04) followed by the prediction of happiness based on speech features (<italic>R</italic><sup>2</sup> mean 0.11, SD 0.06) and the prediction of valence based on all language features (<italic>R</italic><sup>2</sup> mean 0.09, SD 0.05). The mean predictive <italic>R</italic><sup>2</sup> values of speech models varied between −0.01 and 0.11, those of writing models varied between −0.02 and 0.02, and those of all features varied between −0.02 and 0.11. At the trait level, the speech models performed best, although only two of the mean predictive <italic>R</italic><sup>2</sup> values were &#62;0: the speech prediction of arousal (<italic>R</italic><sup>2</sup> mean 0.08, SD 0.50) and the altogether prediction of arousal (<italic>R</italic><sup>2</sup> mean 0.03, SD 0.49).</p>
        <fig id="figure11" position="float">
          <label>Figure 11</label>
          <caption>
            <p>Predictive <italic>R</italic><sup>2</sup> of combined language data at the momentary level.</p>
          </caption>
          <graphic xlink:href="mental_v9i2e31724_fig11.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <fig id="figure12" position="float">
          <label>Figure 12</label>
          <caption>
            <p>Predictive <italic>R</italic><sup>2</sup> of combined language data at the trait level.</p>
          </caption>
          <graphic xlink:href="mental_v9i2e31724_fig12.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>In this study, we investigated the potential of mobile-sensed language features as unobtrusive emotion markers. We looked at pairwise multilevel correlations between emotions or mood and language features—distinguishing between speech content, speech form, writing content, and writing form—and at the (combined) predictive performance of those features in regression models.</p>
      <sec>
        <title>Correlation Analyses and Predictive Modeling</title>
        <sec>
          <title>Speech Content</title>
          <p>Most of the significant correlations were found between speech content features and momentary emotions but were rather low, varying between &#124;0.11&#124; and &#124;0.25&#124;. However, they are in the range of those found in previous studies [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Most of these significant correlations were found for state valence and happiness, which is also in line with the literature. We found that the explicit emotion LIWC dimensions had the strongest correlations but did not find evidence of a relationship between pronoun use and emotion [<xref ref-type="bibr" rid="ref52">52</xref>]. We expected to find at least some correlations with pronouns or negative emotion words at the trait level [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref29">29</xref>,<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>], but no correlations were significant after FDR correction.</p>
        </sec>
        <sec>
          <title>Speech Form</title>
          <p>Speech form and momentary emotions also displayed some significant correlations, ranging from &#124;0.11&#124; to &#124;0.23&#124;. Most of the literature has focused on discriminating high-arousal from low-arousal emotions [<xref ref-type="bibr" rid="ref33">33</xref>,<xref ref-type="bibr" rid="ref39">39</xref>-<xref ref-type="bibr" rid="ref41">41</xref>]. In this study, arousal was indeed represented, but so were valence and happiness. However, anger was not. We expected F0, loudness, and speech rate to be important; however, in this study, only the loudness measures and pause duration were notable. At the trait level, nothing was significant. This is surprising given that most of the literature on speech form is based on between-person research.</p>
        </sec>
        <sec>
          <title>Writing Content</title>
          <p>Writing content features showed only a few weak significant correlations. Varying between &#124;0.05&#124; and &#124;0.10&#124;, these were lower than expected yet not entirely surprising given the mixed results throughout previous work [<xref ref-type="bibr" rid="ref28">28</xref>,<xref ref-type="bibr" rid="ref51">51</xref>]. Valence was again the best represented; however, in contrast to speech content, the first-person singular was most notable in writing along with word count. At the trait level, the exclamation marks seemed promising at first but turned nonsignificant after FDR correction, meaning this study was not able to replicate earlier findings with anxiety and depression [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref53">53</xref>,<xref ref-type="bibr" rid="ref55">55</xref>-<xref ref-type="bibr" rid="ref60">60</xref>].</p>
        </sec>
        <sec>
          <title>Writing Form</title>
          <p>Writing form showed the least amount of significant correlations, also in the range of &#124;0.05&#124; to &#124;0.10&#124;. In the literature, typing speed and average key press duration have been seemingly linked to emotions; however, in this study, the number of characters and the number of keyboard entries were the most telling. They followed the direction of the word count correlations. Here, again, valence and happiness showed the most correlations, and the complete trait level was nonsignificant.</p>
        </sec>
        <sec>
          <title>Predictive Modeling</title>
          <p>As could be expected based on the number of significant correlations, valence and happiness showed the highest predictive <italic>R</italic><sup>2</sup> values at the momentary level. In addition, the speech content models performed best followed by the speech form models. The predictive <italic>R</italic><sup>2</sup> estimations of the writing content and form models always stayed close to 0, although their variation was smaller (<xref rid="figure9" ref-type="fig">Figure 9</xref>). This is all in line with the previously found correlations. In addition, the size of the values followed the trend of the correlations and remained rather low—at most, 10% of peoples’ state emotions can be predicted based on their momentary language.</p>
          <p>When combining multiple types of momentary language data into the same models, writing does not contribute to better predictions. Combining speech content and form features yields more or less the same results as their separate models, whereas adding writing content and form features does not further improve the predictive performance. An important remark here is that not all ESM surveys with voice recordings had additional keyboard activity. Because of this, the data set was further reduced in size, which might contribute to the fact that the combined models seemingly have no added value.</p>
          <p>No significant correlations were found at the trait level. In addition, by aggregating, the number of data points was reduced from multiple observations to a single observation per participant. As a result, our expectations for trait predictive performance were lower than those for the momentary models. As can be seen in <xref rid="figure10" ref-type="fig">Figures 10</xref> and <xref rid="figure12" ref-type="fig">12</xref>, the estimations of the predictive <italic>R</italic><sup>2</sup> based on varying training and test sets show a larger variation than those of the momentary models. Moreover, they are clustered around 0 with numerous negative outliers, indicating regular overfitting of the training set. There was one type of data that performed better than the others: &#62;75% of the predictive <italic>R</italic><sup>2</sup> estimations based on the speech form models for valence, arousal, anxiety, and happiness performed &#62;0, indicating at least some predictive value.</p>
          <p>Overall, the found relationships were largely in the predicted direction but were very modest in size. For speech, these values are more or less in line with previously obtained results; however, writing performed below expectations. There are 3 main differences between voice recordings and keyboard activity that might account for this. One is the nature of collection—voice recordings were deliberately voiced, whereas keyboard activity was unobtrusively recorded. Second, keyboard activity was gathered without any instruction, whereas voice recordings came with the explicit instruction for the participants to say what they were doing and how they felt. Finally, although LIWC was able to categorize on average 87.17% (SD 38.83%) of the spoken words, it only recognized on average 54.23% (SD 25.81%) of the text messages because of typos and other distortions.</p>
          <p>A second dichotomy exists between the momentary and trait values. Previous work has often focused on between-group designs; however, this study could only record significant within-person correlations. At the trait level, we found no significant correlations, and predictive trait models showed more variability and 0 or negative predictive <italic>R</italic><sup>2</sup> values. Possibly, by aggregating the emotion and language data, important context data of their relationship were lost, and moment-to-moment tendencies were flattened out. For predictive modeling, trait level also meant a reduction in data points to train and test a model. The repeated redistribution of a small number of participants over the training and test sets will induce larger changes than a larger data set. Furthermore, momentary-level models are trained and tested within persons, whereas trait-level models are trained and tested between persons. The overfitting of predictive models at the trait level suggests that the participants’ emotions and language use were too dissimilar to be encapsulated in 1 model (except perhaps for speech form).</p>
        </sec>
      </sec>
      <sec>
        <title>Limitations and Future Directions</title>
        <p>The first limitation entails that data collection was dependent on the participants’ willingness to use the custom-made keyboard instead of their default one and to make recordings. This reduced the number of observations and created an unbalanced data set. Ideally, the smartphone’s own keyboard and microphone could be activated and logged at will. This is impossible because of technical and ethical constraints. A solution might be to link reimbursement directly to the provision of valid data in the form of keyboard use or voice recordings, although this might lead too much to a perception of coercion.</p>
        <p>A second limitation lies in the software used. We worked with LIWC as it is widely used in the literature and provides a fast and easy-to-use interface. A downside is that it only recognizes single words and not phrases. When the participants talked about feeling <italic>not too happy</italic>, LIWC scored this as a positive emotion and a negation. When looking at the correlations, this did not seem to pose a direct problem in this study, although it could add noise and reduce statistical significance. What might be more problematic is the language the participants used in their texting: abbreviations, typos, and neologisms. Although LIWC 2015 has a netspeak dimension, the average word recognition of writing was only 54.23% (SD 25.81%). In future studies, one might consider preprocessing all writing by hand, although this will be a very time-consuming task.</p>
        <p>A third limitation is inherent to the sample of participants. Despite the strong representation of depression and language use in the literature, this study was not able to link depressive symptoms to any language feature. Replicating this study in a more diverse or clinical population might yield other results for depression.</p>
        <p>Finally, language is strongly dependent on the chosen medium. Talking to a smartphone with a specific instruction restrains the natural flow of language and can compromise the generalizability of these findings. More technically, this also means that the participants would sometimes talk softly in a quiet room, whereas they might be screaming over the noise in another recording. We should keep in mind the fact that loudness is as much a factor of the environment as it is of the voice. Then again, this context might also say something about the emotional experience in itself.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>This study investigated the relationship between self-reported emotions and 4 types of mobile-sensed language. The found correlations and predictive performances were overall weak, remaining &#60;0.25. The best-performing language type was speech content, which displayed the largest number of significant correlations and the largest predictive <italic>R</italic><sup>2</sup> values at the momentary level, followed by speech form. At the trait level, no significant correlations were found, resulting in unreliable predictive models. Only speech form models were able to reach a mean predictive <italic>R</italic><sup>2</sup> value &#62;0 at the trait level. Among the studied emotions, valence and happiness showed the most significant correlations and predictability. In conclusion, this means that the potential of this particular set of mobile-sensed language features as emotion markers, although promising, remains rather low.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">DASS</term>
          <def>
            <p>Depression, Anxiety, and Stress Scale</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">ESM</term>
          <def>
            <p>experience sampling method</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">F0</term>
          <def>
            <p>fundamental frequency</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">FDR</term>
          <def>
            <p>false discovery rate</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">LIWC</term>
          <def>
            <p>Linguistic Inquiry and Word Count</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>The research reported in this paper is supported by Katholieke Universiteit Leuven Research Council grants C14/19/054 and C3/20/005 and by the European Research Council under the Horizon 2020 research and innovation program of the European Union and the European Research Council Consolidator grant SONORA (773268). This paper reflects only the authors’ views, and the Union is not liable for any use that may be made of the contained information.</p>
    </ack>
    <fn-group>
      <fn fn-type="con">
        <p>CC contributed to writing and analysis. KN contributed to writing and analysis. MM contributed to the methodology and investigation. MB contributed to review, editing, and data curation. PV contributed to review, editing, and data curation. LG contributed to review and editing, methodology, and investigation. TvW contributed to review and editing, methodology, and investigation. PK contributed to writing and analysis.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Frijda</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>The laws of emotion</article-title>
          <source>Am Psychol</source>
          <year>1988</year>
          <month>05</month>
          <volume>43</volume>
          <issue>5</issue>
          <fpage>349</fpage>
          <lpage>58</lpage>
          <pub-id pub-id-type="doi">10.1037//0003-066x.43.5.349</pub-id>
          <pub-id pub-id-type="medline">3389582</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Houben</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Van Den Noortgate</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Kuppens</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>The relation between short-term emotion dynamics and psychological well-being: a meta-analysis</article-title>
          <source>Psychol Bull</source>
          <year>2015</year>
          <month>07</month>
          <volume>141</volume>
          <issue>4</issue>
          <fpage>901</fpage>
          <lpage>30</lpage>
          <pub-id pub-id-type="doi">10.1037/a0038822</pub-id>
          <pub-id pub-id-type="medline">25822133</pub-id>
          <pub-id pub-id-type="pii">2015-13569-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kuppens</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Verduyn</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Emotion dynamics</article-title>
          <source>Curr Opin Psychol</source>
          <year>2017</year>
          <month>10</month>
          <volume>17</volume>
          <fpage>22</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1016/j.copsyc.2017.06.004</pub-id>
          <pub-id pub-id-type="medline">28950968</pub-id>
          <pub-id pub-id-type="pii">S2352-250X(16)30201-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fredrickson</surname>
              <given-names>BL</given-names>
            </name>
          </person-group>
          <article-title>Cultivating positive emotions to optimize health and well-being</article-title>
          <source>Prevention Treatment</source>
          <year>2000</year>
          <volume>3</volume>
          <issue>1</issue>
          <pub-id pub-id-type="doi">10.1037/1522-3736.3.1.31a</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>KR</given-names>
            </name>
          </person-group>
          <article-title>The dynamic architecture of emotion: evidence for the component process model</article-title>
          <source>Cognit Emotion</source>
          <year>2009</year>
          <month>11</month>
          <volume>23</volume>
          <issue>7</issue>
          <fpage>1307</fpage>
          <lpage>51</lpage>
          <pub-id pub-id-type="doi">10.1080/02699930902928969</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ebner-Priemer</surname>
              <given-names>UW</given-names>
            </name>
            <name name-style="western">
              <surname>Eid</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kleindienst</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Stabenow</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Trull</surname>
              <given-names>TJ</given-names>
            </name>
          </person-group>
          <article-title>Analytic strategies for understanding affective (in)stability and other dynamic processes in psychopathology</article-title>
          <source>J Abnorm Psychol</source>
          <year>2009</year>
          <month>02</month>
          <volume>118</volume>
          <issue>1</issue>
          <fpage>195</fpage>
          <lpage>202</lpage>
          <pub-id pub-id-type="doi">10.1037/a0014868</pub-id>
          <pub-id pub-id-type="medline">19222325</pub-id>
          <pub-id pub-id-type="pii">2009-01738-010</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Consolvo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Walker</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Using the experience sampling method to evaluate ubicomp applications</article-title>
          <source>IEEE Pervasive Comput</source>
          <year>2003</year>
          <month>04</month>
          <volume>2</volume>
          <issue>2</issue>
          <fpage>24</fpage>
          <lpage>31</lpage>
          <pub-id pub-id-type="doi">10.1109/MPRV.2003.1203750</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Intille</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rondoni</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kukla</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Ancona</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Bao</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>A context-aware experience sampling tool</article-title>
          <source>Proceedings of the CHI '03 Extended Abstracts on Human Factors in Computing Systems</source>
          <year>2003</year>
          <conf-name>CHI '03 Extended Abstracts on Human Factors in Computing Systems</conf-name>
          <conf-date>Apr 5 - 10, 2003</conf-date>
          <conf-loc>Ft. Lauderdale Florida USA</conf-loc>
          <pub-id pub-id-type="doi">10.1145/765891.766101</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scollon</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Prieto</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Diener</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Experience sampling: promises and pitfalls, strength and weaknesses</article-title>
          <source>Assessing Well-Being</source>
          <year>2009</year>
          <publisher-loc>Dordrecht</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kahneman</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Krueger</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Schkade</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Schwarz</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Stone</surname>
              <given-names>AA</given-names>
            </name>
          </person-group>
          <article-title>A survey method for characterizing daily life experience: the day reconstruction method</article-title>
          <source>Science</source>
          <year>2004</year>
          <month>12</month>
          <day>03</day>
          <volume>306</volume>
          <issue>5702</issue>
          <fpage>1776</fpage>
          <lpage>80</lpage>
          <pub-id pub-id-type="doi">10.1126/science.1103572</pub-id>
          <pub-id pub-id-type="medline">15576620</pub-id>
          <pub-id pub-id-type="pii">306/5702/1776</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mehrotra</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vermeulen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Pejovic</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Musolesi</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Ask, but don't interrupt: the case for interruptibility-aware mobile experience sampling</article-title>
          <source>Proceedings of the UbiComp '15: The 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing</source>
          <year>2015</year>
          <conf-name>UbiComp '15: The 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing</conf-name>
          <conf-date>Sep 7 - 11, 2015</conf-date>
          <conf-loc>Osaka Japan</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2800835.2804397</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lieberman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Inagaki</surname>
              <given-names>TK</given-names>
            </name>
            <name name-style="western">
              <surname>Tabibnia</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Crockett</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>Subjective responses to emotional stimuli during labeling, reappraisal, and distraction</article-title>
          <source>Emotion</source>
          <year>2011</year>
          <month>06</month>
          <volume>11</volume>
          <issue>3</issue>
          <fpage>468</fpage>
          <lpage>80</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/21534661"/>
          </comment>
          <pub-id pub-id-type="doi">10.1037/a0023503</pub-id>
          <pub-id pub-id-type="medline">21534661</pub-id>
          <pub-id pub-id-type="pii">2011-08959-001</pub-id>
          <pub-id pub-id-type="pmcid">PMC3444304</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Onnela</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Rauch</surname>
              <given-names>SL</given-names>
            </name>
          </person-group>
          <article-title>Harnessing smartphone-based digital phenotyping to enhance behavioral and mental health</article-title>
          <source>Neuropsychopharmacology</source>
          <year>2016</year>
          <month>06</month>
          <volume>41</volume>
          <issue>7</issue>
          <fpage>1691</fpage>
          <lpage>6</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/26818126"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/npp.2016.7</pub-id>
          <pub-id pub-id-type="medline">26818126</pub-id>
          <pub-id pub-id-type="pii">npp20167</pub-id>
          <pub-id pub-id-type="pmcid">PMC4869063</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mohr</surname>
              <given-names>DC</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schueller</surname>
              <given-names>SM</given-names>
            </name>
          </person-group>
          <article-title>Personal sensing: understanding mental health using ubiquitous sensors and machine learning</article-title>
          <source>Annu Rev Clin Psychol</source>
          <year>2017</year>
          <month>05</month>
          <day>08</day>
          <volume>13</volume>
          <fpage>23</fpage>
          <lpage>47</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/28375728"/>
          </comment>
          <pub-id pub-id-type="doi">10.1146/annurev-clinpsy-032816-044949</pub-id>
          <pub-id pub-id-type="medline">28375728</pub-id>
          <pub-id pub-id-type="pmcid">PMC6902121</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Harari</surname>
              <given-names>GM</given-names>
            </name>
            <name name-style="western">
              <surname>Lane</surname>
              <given-names>ND</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Crosier</surname>
              <given-names>BS</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>AT</given-names>
            </name>
            <name name-style="western">
              <surname>Gosling</surname>
              <given-names>SD</given-names>
            </name>
          </person-group>
          <article-title>Using smartphones to collect behavioral data in psychological science: opportunities, practical considerations, and challenges</article-title>
          <source>Perspect Psychol Sci</source>
          <year>2016</year>
          <month>11</month>
          <volume>11</volume>
          <issue>6</issue>
          <fpage>838</fpage>
          <lpage>54</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/27899727"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/1745691616650285</pub-id>
          <pub-id pub-id-type="medline">27899727</pub-id>
          <pub-id pub-id-type="pii">11/6/838</pub-id>
          <pub-id pub-id-type="pmcid">PMC5572675</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adler</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-Zeev</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tseng</surname>
              <given-names>VW</given-names>
            </name>
            <name name-style="western">
              <surname>Kane</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Brian</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>AT</given-names>
            </name>
            <name name-style="western">
              <surname>Hauser</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Choudhury</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Predicting early warning signs of psychotic relapse from passive sensing data: an approach using encoder-decoder neural networks</article-title>
          <source>JMIR Mhealth Uhealth</source>
          <year>2020</year>
          <month>08</month>
          <day>31</day>
          <volume>8</volume>
          <issue>8</issue>
          <fpage>e19962</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mhealth.jmir.org/2020/8/e19962/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/19962</pub-id>
          <pub-id pub-id-type="medline">32865506</pub-id>
          <pub-id pub-id-type="pii">v8i8e19962</pub-id>
          <pub-id pub-id-type="pmcid">PMC7490673</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cai</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Boukhechba</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Chow</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Teachman</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Barnes</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gerber</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>State affect recognition using smartphone sensing data</article-title>
          <source>Proceedings of the 2018 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies</source>
          <year>2018</year>
          <conf-name>CHASE '18: Proceedings of the 2018 IEEE/ACM International Conference on Connected Health: Applications, Systems and Engineering Technologies</conf-name>
          <conf-date>Sep 26 - 28, 2018</conf-date>
          <conf-loc>Washington DC</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3278576.3284386</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sultana</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Al-Jefri</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Using machine learning and smartphone and smartwatch data to detect emotional states and transitions: exploratory study</article-title>
          <source>JMIR Mhealth Uhealth</source>
          <year>2020</year>
          <month>09</month>
          <day>29</day>
          <volume>8</volume>
          <issue>9</issue>
          <fpage>e17818</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://mhealth.jmir.org/2020/9/e17818/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/17818</pub-id>
          <pub-id pub-id-type="medline">32990638</pub-id>
          <pub-id pub-id-type="pii">v8i9e17818</pub-id>
          <pub-id pub-id-type="pmcid">PMC7584158</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Aung</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Abdullah</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brian</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Choudhury</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hauser</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kane</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Merrill</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Tseng</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ben-Zeev</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Toward passive sensing detection of mental health changes in people with schizophrenia</article-title>
          <source>Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing</source>
          <year>2016</year>
          <conf-name>UbiComp '16: Proceedings of the 2016 ACM International Joint Conference on Pervasive and Ubiquitous Computing</conf-name>
          <conf-date>Sep 12 - 16, 2016</conf-date>
          <conf-loc>Heidelberg Germany</conf-loc>
          <pub-id pub-id-type="doi">10.1145/2971648.2971740</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hanzo</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Global system for mobile communications (GSM)</article-title>
          <source>Scholarpedia</source>
          <year>2008</year>
          <volume>3</volume>
          <issue>8</issue>
          <fpage>4115</fpage>
          <pub-id pub-id-type="doi">10.4249/scholarpedia.4115</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jablonka</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Ginsburg</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dor</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>The co-evolution of language and emotions</article-title>
          <source>Philos Trans R Soc Lond B Biol Sci</source>
          <year>2012</year>
          <month>08</month>
          <day>05</day>
          <volume>367</volume>
          <issue>1599</issue>
          <fpage>2152</fpage>
          <lpage>9</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/22734058"/>
          </comment>
          <pub-id pub-id-type="doi">10.1098/rstb.2012.0117</pub-id>
          <pub-id pub-id-type="medline">22734058</pub-id>
          <pub-id pub-id-type="pii">rstb.2012.0117</pub-id>
          <pub-id pub-id-type="pmcid">PMC3385682</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Mehl</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Niederhoffer</surname>
              <given-names>KG</given-names>
            </name>
          </person-group>
          <article-title>Psychological aspects of natural language. use: our words, our selves</article-title>
          <source>Annu Rev Psychol</source>
          <year>2003</year>
          <volume>54</volume>
          <fpage>547</fpage>
          <lpage>77</lpage>
          <pub-id pub-id-type="doi">10.1146/annurev.psych.54.101601.145041</pub-id>
          <pub-id pub-id-type="medline">12185209</pub-id>
          <pub-id pub-id-type="pii">101601.145041</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tausczik</surname>
              <given-names>YR</given-names>
            </name>
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>JW</given-names>
            </name>
          </person-group>
          <article-title>The psychological meaning of words: LIWC and computerized text analysis methods</article-title>
          <source>J Language Soc Psychol</source>
          <year>2009</year>
          <month>12</month>
          <day>08</day>
          <volume>29</volume>
          <issue>1</issue>
          <fpage>24</fpage>
          <lpage>54</lpage>
          <pub-id pub-id-type="doi">10.1177/0261927X09351676</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Epp</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lippold</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Mandryk</surname>
              <given-names>R</given-names>
            </name>
          </person-group>
          <article-title>Identifying emotional states using keystroke dynamics</article-title>
          <source>Proceedings of the SIGCHI Conference on Human Factors in Computing Systems</source>
          <year>2011</year>
          <conf-name>CHI '11: Proceedings of the SIGCHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>May 7 - 12, 2011</conf-date>
          <conf-loc>Vancouver BC Canada</conf-loc>
          <pub-id pub-id-type="doi">10.1145/1978942.1979046</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Juslin</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Speech emotion analysis</article-title>
          <source>Scholarpedia</source>
          <year>2008</year>
          <volume>3</volume>
          <issue>10</issue>
          <fpage>4240</fpage>
          <pub-id pub-id-type="doi">10.4249/scholarpedia.4240</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kappas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hess</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Voice and emotion</article-title>
          <source>Fundamentals of Nonverbal Behavior</source>
          <year>1991</year>
          <publisher-loc>Cambridge, UK</publisher-loc>
          <publisher-name>Cambridge University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Engberink</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>De relatie tussen emotie-expressie, taalgebruik en autobiografische herinneringen : bij gezonde ouderen</article-title>
          <source>University of Twente</source>
          <access-date>2022-02-01</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://essay.utwente.nl/79239/">http://essay.utwente.nl/79239/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kahn</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Tobin</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Massey</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Measuring emotional expression with the Linguistic Inquiry and Word Count</article-title>
          <source>Am J Psychol</source>
          <year>2007</year>
          <volume>120</volume>
          <issue>2</issue>
          <fpage>263</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="medline">17650921</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cohen</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Minor</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Baillie</surname>
              <given-names>LE</given-names>
            </name>
            <name name-style="western">
              <surname>Dahir</surname>
              <given-names>AM</given-names>
            </name>
          </person-group>
          <article-title>Clarifying the linguistic signature: measuring personality from natural speech</article-title>
          <source>J Pers Assess</source>
          <year>2008</year>
          <month>11</month>
          <volume>90</volume>
          <issue>6</issue>
          <fpage>559</fpage>
          <lpage>63</lpage>
          <pub-id pub-id-type="doi">10.1080/00223890802388459</pub-id>
          <pub-id pub-id-type="medline">18925496</pub-id>
          <pub-id pub-id-type="pii">904257202</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Son</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Kern</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vazire</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The language of well-being: tracking fluctuations in emotion experience through everyday speech</article-title>
          <source>J Pers Soc Psychol</source>
          <year>2020</year>
          <month>02</month>
          <volume>118</volume>
          <issue>2</issue>
          <fpage>364</fpage>
          <lpage>87</lpage>
          <pub-id pub-id-type="doi">10.1037/pspp0000244</pub-id>
          <pub-id pub-id-type="medline">30945904</pub-id>
          <pub-id pub-id-type="pii">2019-18075-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Capecelatro</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Sacchet</surname>
              <given-names>MD</given-names>
            </name>
            <name name-style="western">
              <surname>Hitchcock</surname>
              <given-names>PF</given-names>
            </name>
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Britton</surname>
              <given-names>WB</given-names>
            </name>
          </person-group>
          <article-title>Major depression duration reduces appetitive word use: an elaborated verbal recall of emotional photographs</article-title>
          <source>J Psychiatr Res</source>
          <year>2013</year>
          <month>06</month>
          <volume>47</volume>
          <issue>6</issue>
          <fpage>809</fpage>
          <lpage>15</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/23510497"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jpsychires.2013.01.022</pub-id>
          <pub-id pub-id-type="medline">23510497</pub-id>
          <pub-id pub-id-type="pii">S0022-3956(13)00045-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC3732741</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tackman</surname>
              <given-names>AM</given-names>
            </name>
            <name name-style="western">
              <surname>Sbarra</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Carey</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Donnellan</surname>
              <given-names>MB</given-names>
            </name>
            <name name-style="western">
              <surname>Horn</surname>
              <given-names>AB</given-names>
            </name>
            <name name-style="western">
              <surname>Holtzman</surname>
              <given-names>NS</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>TS</given-names>
            </name>
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Mehl</surname>
              <given-names>MR</given-names>
            </name>
          </person-group>
          <article-title>Depression, negative emotionality, and self-referential language: a multi-lab, multi-measure, and multi-language-task research synthesis</article-title>
          <source>J Pers Soc Psychol</source>
          <year>2019</year>
          <month>05</month>
          <volume>116</volume>
          <issue>5</issue>
          <fpage>817</fpage>
          <lpage>34</lpage>
          <pub-id pub-id-type="doi">10.1037/pspp0000187</pub-id>
          <pub-id pub-id-type="medline">29504797</pub-id>
          <pub-id pub-id-type="pii">2018-09035-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johnstone</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Vocal communication of emotion</article-title>
          <source>The Handbook of Emotions</source>
          <year>2000</year>
          <publisher-loc>New York</publisher-loc>
          <publisher-name>Guilford</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Faurholt-Jepsen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Busk</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Frost</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Vinberg</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Christensen</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>Winther</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Bardram</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Kessing</surname>
              <given-names>LV</given-names>
            </name>
          </person-group>
          <article-title>Voice analysis as an objective state marker in bipolar disorder</article-title>
          <source>Transl Psychiatry</source>
          <year>2016</year>
          <month>07</month>
          <day>19</day>
          <volume>6</volume>
          <fpage>e856</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/tp.2016.123"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/tp.2016.123</pub-id>
          <pub-id pub-id-type="medline">27434490</pub-id>
          <pub-id pub-id-type="pii">tp2016123</pub-id>
          <pub-id pub-id-type="pmcid">PMC5545710</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>France</surname>
              <given-names>DJ</given-names>
            </name>
            <name name-style="western">
              <surname>Shiavi</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Acoustical properties of speech as indicators of depression and suicidal risk</article-title>
          <source>IEEE Trans Biomed Eng</source>
          <year>2000</year>
          <month>07</month>
          <volume>47</volume>
          <issue>7</issue>
          <fpage>829</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.1109/10.846676</pub-id>
          <pub-id pub-id-type="medline">10916253</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Marchi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Eyben</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Hagerer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Real-time tracking of speakers’ emotions, states, and traits on mobile platforms</article-title>
          <source>Proceedings of the INTERSPEECH 2016: Show &#38; Tell Contribution</source>
          <year>2016</year>
          <conf-name>INTERSPEECH 2016</conf-name>
          <conf-date>Sep 8–12, 2016</conf-date>
          <conf-loc>San-Fransisco, USA</conf-loc>
          <publisher-name>ISCA</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Maxhuni</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Muñoz-Meléndez</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Osmani</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Perez</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Mayora</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Morales</surname>
              <given-names>EF</given-names>
            </name>
          </person-group>
          <article-title>Classification of bipolar disorder episodes based on analysis of voice and motor activity of patients</article-title>
          <source>Pervasive Mobile Comput</source>
          <year>2016</year>
          <month>09</month>
          <volume>31</volume>
          <fpage>50</fpage>
          <lpage>66</lpage>
          <pub-id pub-id-type="doi">10.1016/j.pmcj.2016.01.008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Muaremi</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Gravenhorst</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Grünerbl</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Arnrich</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Tröster</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Assessing bipolar episodes using speech cues derived from phone calls</article-title>
          <source>Pervasive Computing Paradigms for Mental Health</source>
          <year>2014</year>
          <month>09</month>
          <day>26</day>
          <publisher-loc>Cham</publisher-loc>
          <publisher-name>Springer International Publishing</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bachorowski</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Vocal expression and perception of emotion</article-title>
          <source>Curr Dir Psychol Sci</source>
          <year>2016</year>
          <month>06</month>
          <day>24</day>
          <volume>8</volume>
          <issue>2</issue>
          <fpage>53</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1111/1467-8721.00013</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Murray</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Arnott</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Toward the simulation of emotion in synthetic speech: a review of the literature on human vocal emotion</article-title>
          <source>J Acoust Soc Am</source>
          <year>1993</year>
          <month>02</month>
          <volume>93</volume>
          <issue>2</issue>
          <fpage>1097</fpage>
          <lpage>108</lpage>
          <pub-id pub-id-type="doi">10.1121/1.405558</pub-id>
          <pub-id pub-id-type="medline">8445120</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Vocal communication of emotion: a review of research paradigms</article-title>
          <source>Speech Commun</source>
          <year>2003</year>
          <month>04</month>
          <volume>40</volume>
          <issue>1-2</issue>
          <fpage>227</fpage>
          <lpage>56</lpage>
          <pub-id pub-id-type="doi">10.1016/s0167-6393(02)00084-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bryant</surname>
              <given-names>GA</given-names>
            </name>
          </person-group>
          <article-title>The evolution of human vocal emotion</article-title>
          <source>Emotion Rev</source>
          <year>2020</year>
          <month>06</month>
          <day>24</day>
          <volume>13</volume>
          <issue>1</issue>
          <fpage>25</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1177/1754073920930791</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ramakrishnan</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Recognition of emotion from speech: a review</article-title>
          <source>InTech Open</source>
          <access-date>2022-02-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://cdn.intechopen.com/pdfs/31885/InTech-Recognition_of_emotion_from_speech_a_review.pdf">https://cdn.intechopen.com/pdfs/31885/InTech-Recognition_of_emotion_from_speech_a_review.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Filippi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Congdon</surname>
              <given-names>JV</given-names>
            </name>
            <name name-style="western">
              <surname>Hoang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Bowling</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Reber</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Pašukonis</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hoeschele</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ocklenburg</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>de Boer</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Sturdy</surname>
              <given-names>CB</given-names>
            </name>
            <name name-style="western">
              <surname>Newen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Güntürkün</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Humans recognize emotional arousal in vocalizations across all classes of terrestrial vertebrates: evidence for acoustic universals</article-title>
          <source>Proc Biol Sci</source>
          <year>2017</year>
          <month>07</month>
          <day>26</day>
          <volume>284</volume>
          <issue>1859</issue>
          <fpage>20170990</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/28747478"/>
          </comment>
          <pub-id pub-id-type="doi">10.1098/rspb.2017.0990</pub-id>
          <pub-id pub-id-type="medline">28747478</pub-id>
          <pub-id pub-id-type="pii">rspb.2017.0990</pub-id>
          <pub-id pub-id-type="pmcid">PMC5543225</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ozdas</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Shiavi</surname>
              <given-names>RG</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>SE</given-names>
            </name>
            <name name-style="western">
              <surname>Silverman</surname>
              <given-names>MK</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>DM</given-names>
            </name>
          </person-group>
          <article-title>Investigation of vocal jitter and glottal flow spectrum as possible cues for depression and near-term suicidal risk</article-title>
          <source>IEEE Trans Biomed Eng</source>
          <year>2004</year>
          <month>09</month>
          <volume>51</volume>
          <issue>9</issue>
          <fpage>1530</fpage>
          <lpage>40</lpage>
          <pub-id pub-id-type="doi">10.1109/TBME.2004.827544</pub-id>
          <pub-id pub-id-type="medline">15376501</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Steidl</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Batliner</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Vinciarelli</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ringeval</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Chetouani</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Weninger</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Eyben</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Marchi</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Mortillaro</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Salamin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Polychroniou</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Valente</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>The INTERSPEECH 2013 computational paralinguistics challenge: social signals, conflict, emotion, autism</article-title>
          <source>Proceedings of the INTERSPEECH 2013: 14th Annual Conference of the International Speech Communication Association</source>
          <year>2013</year>
          <conf-name>Proceedings of the INTERSPEECH 2013: 14th Annual Conference of the International Speech Communication Association</conf-name>
          <conf-date>Aug 25-29, 2013</conf-date>
          <conf-loc>Lyon, France</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://hal.sorbonne-universite.fr/hal-02423147"/>
          </comment>
          <pub-id pub-id-type="doi">10.21437/interspeech.2013-56</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tzirakis</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>End-to-end speech emotion recognition using deep neural networks</article-title>
          <source>Proceedings of the 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</source>
          <year>2018</year>
          <conf-name>ICASSP 2018 - 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)</conf-name>
          <conf-date>Apr 15-20, 2018</conf-date>
          <conf-loc>Calgary, AB, Canada</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icassp.2018.8462677</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>KR</given-names>
            </name>
          </person-group>
          <article-title>Vocal markers of emotion: comparing induction and acting elicitation</article-title>
          <source>Comput Speech Language</source>
          <year>2013</year>
          <month>1</month>
          <volume>27</volume>
          <issue>1</issue>
          <fpage>40</fpage>
          <lpage>58</lpage>
          <pub-id pub-id-type="doi">10.1016/j.csl.2011.11.003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gill</surname>
              <given-names>AJ</given-names>
            </name>
            <name name-style="western">
              <surname>French</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Gergle</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Oberlander</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Identifying emotional characteristics from short blog texts</article-title>
          <source>Proceedings for the 30th Annual Meeting of the Cognitive Science Society</source>
          <year>2008</year>
          <conf-name>30th Annual Meeting of the Cognitive Science Society</conf-name>
          <conf-date>Jul 23-26, 2008</conf-date>
          <conf-loc>Washington DC, USA</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://csjarchive.cogsci.rpi.edu/proceedings/2008/pdfs/p2237.pdf"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hancock</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Landrigan</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Silver</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Expressing emotion in text-based communication</article-title>
          <source>Proceedings of the SIGCHI Conference on Human Factors in Computing Systems</source>
          <year>2007</year>
          <conf-name>CHI07: CHI Conference on Human Factors in Computing Systems</conf-name>
          <conf-date>Apr 28 - May 7, 2007</conf-date>
          <conf-loc>San Jose, California, USA</conf-loc>
          <pub-id pub-id-type="doi">10.1145/1240624.1240764</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tov</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Ng</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Qiu</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Detecting well-being via computerized content analysis of brief diary entries</article-title>
          <source>Psychol Assess</source>
          <year>2013</year>
          <month>12</month>
          <volume>25</volume>
          <issue>4</issue>
          <fpage>1069</fpage>
          <lpage>78</lpage>
          <pub-id pub-id-type="doi">10.1037/a0033007</pub-id>
          <pub-id pub-id-type="medline">23730828</pub-id>
          <pub-id pub-id-type="pii">2013-19093-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>The Secret Life of Pronouns: What Our Words Say About Us</source>
          <year>2013</year>
          <publisher-loc>London, United Kingdom</publisher-loc>
          <publisher-name>Bloomsbury Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Al-Mosaiwi</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Johnstone</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>In an absolute state: elevated use of absolutist words is a marker specific to anxiety, depression, and suicidal ideation</article-title>
          <source>Clin Psychol Sci</source>
          <year>2018</year>
          <month>07</month>
          <volume>6</volume>
          <issue>4</issue>
          <fpage>529</fpage>
          <lpage>42</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/2167702617747074?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/2167702617747074</pub-id>
          <pub-id pub-id-type="medline">30886766</pub-id>
          <pub-id pub-id-type="pii">10.1177_2167702617747074</pub-id>
          <pub-id pub-id-type="pmcid">PMC6376956</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Settanni</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Marengo</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Sharing feelings online: studying emotional well-being via automated text analysis of Facebook posts</article-title>
          <source>Front Psychol</source>
          <year>2015</year>
          <volume>6</volume>
          <fpage>1045</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyg.2015.01045"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyg.2015.01045</pub-id>
          <pub-id pub-id-type="medline">26257692</pub-id>
          <pub-id pub-id-type="pmcid">PMC4512028</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref55">
        <label>55</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brockmeyer</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Zimmermann</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kulessa</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Hautzinger</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Bents</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Friederich</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Herzog</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Backenstrass</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Me, myself, and I: self-referent word use as an indicator of self-focused attention in relation to depression and anxiety</article-title>
          <source>Front Psychol</source>
          <year>2015</year>
          <volume>6</volume>
          <fpage>1564</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpsyg.2015.01564"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpsyg.2015.01564</pub-id>
          <pub-id pub-id-type="medline">26500601</pub-id>
          <pub-id pub-id-type="pmcid">PMC4598574</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref56">
        <label>56</label>
        <nlm-citation citation-type="web">
          <article-title>The Secret Life of Pronouns: James Pennebaker at TEDxAustin</article-title>
          <source>YouTube - TED</source>
          <access-date>2022-02-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.youtube.com/watch?v=PGsQwAu3PzU">https://www.youtube.com/watch?v=PGsQwAu3PzU</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref57">
        <label>57</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Rude</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Gortner</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Language use of depressed and depression-vulnerable college students</article-title>
          <source>Cognit Emotion</source>
          <year>2004</year>
          <month>12</month>
          <volume>18</volume>
          <issue>8</issue>
          <fpage>1121</fpage>
          <lpage>33</lpage>
          <pub-id pub-id-type="doi">10.1080/02699930441000030</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref58">
        <label>58</label>
        <nlm-citation citation-type="web">
          <article-title>Voorspelt het gebruik van (positieve) emotiewoorden in e-mails een afname in depressieve klachten? : een analyse van e-mails van deelnemers die een zelfhulpcursus met e-mailbegeleiding volgden</article-title>
          <source>University of Twente</source>
          <access-date>2022-02-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://essay.utwente.nl/59979/">http://essay.utwente.nl/59979/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref59">
        <label>59</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>De Choudhury</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Gamon</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Counts</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Horvitz</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Predicting depression via social media</article-title>
          <source>Proc Int AAAI Conference Web Soc Media</source>
          <year>2021</year>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>128</fpage>
          <lpage>37</lpage>
        </nlm-citation>
      </ref>
      <ref id="ref60">
        <label>60</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ziemer</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Korkmaz</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Using text to predict psychological and physical health: a comparison of human raters and computerized text analysis</article-title>
          <source>Comput Human Behav</source>
          <year>2017</year>
          <month>11</month>
          <volume>76</volume>
          <fpage>122</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1016/j.chb.2017.06.038</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref61">
        <label>61</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Zheng</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Piscitello</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zulueta</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Ajilore</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Ryan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Leow</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>DeepMood: modeling mobile phone typing dynamics for mood detection</article-title>
          <source>Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</source>
          <year>2017</year>
          <conf-name>KDD '17: The 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining</conf-name>
          <conf-date>Aug 13 - 17, 2017</conf-date>
          <conf-loc>Halifax NS Canada</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3097983.3098086</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref62">
        <label>62</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kołakowska</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Kulikowski</surname>
              <given-names>JL</given-names>
            </name>
            <name name-style="western">
              <surname>Mroczek</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Usefulness of keystroke dynamics features in user authentication and emotion recognition</article-title>
          <source>Human-Computer Systems Interaction</source>
          <year>2018</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer International Publishing</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref63">
        <label>63</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lim</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Ayesh</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Stacey</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>The effects of typing demand on emotional stress, mouse and keystroke behaviours</article-title>
          <source>Intelligent Systems in Science and Information 2014</source>
          <year>2015</year>
          <publisher-loc>Cham, Switzerland</publisher-loc>
          <publisher-name>Springer International Publishing</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref64">
        <label>64</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sağbaş</surname>
              <given-names>EA</given-names>
            </name>
            <name name-style="western">
              <surname>Korukoglu</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Balli</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Stress detection via keyboard typing behaviors by using smartphone sensors and machine learning techniques</article-title>
          <source>J Med Syst</source>
          <year>2020</year>
          <month>02</month>
          <day>17</day>
          <volume>44</volume>
          <issue>4</issue>
          <fpage>68</fpage>
          <pub-id pub-id-type="doi">10.1007/s10916-020-1530-z</pub-id>
          <pub-id pub-id-type="medline">32072331</pub-id>
          <pub-id pub-id-type="pii">10.1007/s10916-020-1530-z</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref65">
        <label>65</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Vizer</surname>
              <given-names>LM</given-names>
            </name>
            <name name-style="western">
              <surname>Zhou</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Sears</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Automated stress detection using keystroke and linguistic features: an exploratory study</article-title>
          <source>Int J Human Comput Stud</source>
          <year>2009</year>
          <month>10</month>
          <volume>67</volume>
          <issue>10</issue>
          <fpage>870</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2009.07.005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref66">
        <label>66</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghosh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ganguly</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mitra</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>TapSense: combining self-report patterns and typing characteristics for smartphone based emotion detection</article-title>
          <source>Proceedings of the 19th International Conference on Human-Computer Interaction with Mobile Devices and Services</source>
          <year>2017</year>
          <month>09</month>
          <day>04</day>
          <conf-name>MobileHCI '17: 19th International Conference on Human-Computer Interaction with Mobile Devices and Services</conf-name>
          <conf-date>Sep 4 - 7, 2017</conf-date>
          <conf-loc>Vienna, Austria</conf-loc>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1145/3098279.3098564"/>
          </comment>
          <pub-id pub-id-type="doi">10.1145/3098279.3098564</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref67">
        <label>67</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ghosh</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Hiware</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Ganguly</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Mitra</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>De</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Emotion detection from touch interactions during text entry on smartphones</article-title>
          <source>Int J Human Comput Stud</source>
          <year>2019</year>
          <month>10</month>
          <volume>130</volume>
          <fpage>47</fpage>
          <lpage>57</lpage>
          <pub-id pub-id-type="doi">10.1016/j.ijhcs.2019.04.005</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref68">
        <label>68</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Cao</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Leow</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>dpMood: exploiting local and periodic typing dynamics for personalized mood prediction</article-title>
          <source>Proceedings of the 2018 IEEE International Conference on Data Mining (ICDM)</source>
          <year>2018</year>
          <conf-name>2018 IEEE International Conference on Data Mining (ICDM)</conf-name>
          <conf-date>Nov 17-20, 2018</conf-date>
          <conf-loc>Singapore</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icdm.2018.00031</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref69">
        <label>69</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Mastoras</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Iakovakis</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Hadjidimitriou</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Charisis</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Kassie</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Alsaadi</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Khandoker</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Hadjileontiadis</surname>
              <given-names>LJ</given-names>
            </name>
          </person-group>
          <article-title>Touchscreen typing pattern analysis for remote detection of the depressive tendency</article-title>
          <source>Sci Rep</source>
          <year>2019</year>
          <month>09</month>
          <day>16</day>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>13414</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-019-50002-9"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-019-50002-9</pub-id>
          <pub-id pub-id-type="medline">31527640</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-019-50002-9</pub-id>
          <pub-id pub-id-type="pmcid">PMC6746713</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref70">
        <label>70</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Tsui</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Hsiao</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>The influence of emotion on keyboard typing: an experimental study using auditory stimuli</article-title>
          <source>PLoS One</source>
          <year>2015</year>
          <volume>10</volume>
          <issue>6</issue>
          <fpage>e0129056</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0129056"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0129056</pub-id>
          <pub-id pub-id-type="medline">26065902</pub-id>
          <pub-id pub-id-type="pii">PONE-D-14-22364</pub-id>
          <pub-id pub-id-type="pmcid">PMC4465979</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref71">
        <label>71</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Meers</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Dejonckheere</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Kalokerinos</surname>
              <given-names>EK</given-names>
            </name>
            <name name-style="western">
              <surname>Rummens</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kuppens</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>mobileQ: a free user-friendly application for collecting experience sampling data</article-title>
          <source>Behav Res Methods</source>
          <year>2020</year>
          <month>08</month>
          <volume>52</volume>
          <issue>4</issue>
          <fpage>1510</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.3758/s13428-019-01330-1</pub-id>
          <pub-id pub-id-type="medline">31898294</pub-id>
          <pub-id pub-id-type="pii">10.3758/s13428-019-01330-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref72">
        <label>72</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>de Beurs</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Van Dyck</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Marquenie</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Lange</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Blonk</surname>
              <given-names>RW</given-names>
            </name>
          </person-group>
          <article-title>De DASS: een vragenlijst voor het meten van depressie, angst en stress</article-title>
          <source>Gedragstherapie</source>
          <year>2001</year>
          <volume>34</volume>
          <issue>1</issue>
          <fpage>35</fpage>
          <lpage>53</lpage>
          <pub-id pub-id-type="doi">10.1037/t69646-000</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref73">
        <label>73</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Povey</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ghoshal</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Boulianne</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Burget</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Glembek</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Goel</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Hannemann</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Motlıcˇek</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Qian</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Schwarz</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Silovsky</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Stemmer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Vesely</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The Kaldi Speech Recognition Toolkit</article-title>
          <source>Proceedings of the IEEE 2011 Workshop on Automatic Speech Recognition and Understanding</source>
          <year>2011</year>
          <conf-name>IEEE 2011 Workshop on Automatic Speech Recognition and Understanding</conf-name>
          <conf-date>Dec 11-15, 2011</conf-date>
          <conf-loc>Hilton Waikoloa Village, Big Island, Hawaii, US</conf-loc>
          <pub-id pub-id-type="doi">10.1109/icassp.2010.5495662</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref74">
        <label>74</label>
        <nlm-citation citation-type="web">
          <article-title>R: A language and environment for statistical computing</article-title>
          <source>R Foundation for Statistical Computing</source>
          <access-date>2022-02-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.R-project.org/">https://www.R-project.org/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref75">
        <label>75</label>
        <nlm-citation citation-type="web">
          <article-title>Linguistic Inquiry and Word Count</article-title>
          <source>LIWC</source>
          <access-date>2022-02-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://liwc.wpengine.com/">http://liwc.wpengine.com/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref76">
        <label>76</label>
        <nlm-citation citation-type="web">
          <article-title>An electronic translation of the LIWC dictionary into Dutch</article-title>
          <source>Creative Commons</source>
          <access-date>2022-02-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://elex.link/elex2017/wp-content/uploads/2017/09/paper43.pdf">https://elex.link/elex2017/wp-content/uploads/2017/09/paper43.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref77">
        <label>77</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eyben</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Wöllmer</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Opensmile: the munich versatile and fast open-source audio feature extractor</article-title>
          <source>Proceedings of the international conference on Multimedia - MM '10</source>
          <year>2010</year>
          <conf-name>The international conference on Multimedia - MM '10</conf-name>
          <conf-date>Oct 25 - 29, 2010</conf-date>
          <conf-loc>Florence, Italy</conf-loc>
          <pub-id pub-id-type="doi">10.1145/1873951.1874246</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref78">
        <label>78</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eyben</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Schuller</surname>
              <given-names>BW</given-names>
            </name>
            <name name-style="western">
              <surname>Sundberg</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Andre</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Busso</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Devillers</surname>
              <given-names>LY</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Laukka</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Narayanan</surname>
              <given-names>SS</given-names>
            </name>
            <name name-style="western">
              <surname>Truong</surname>
              <given-names>KP</given-names>
            </name>
          </person-group>
          <article-title>The Geneva Minimalistic Acoustic Parameter Set (GeMAPS) for voice research and affective computing</article-title>
          <source>IEEE Trans Affective Comput</source>
          <year>2016</year>
          <month>4</month>
          <day>1</day>
          <volume>7</volume>
          <issue>2</issue>
          <fpage>190</fpage>
          <lpage>202</lpage>
          <pub-id pub-id-type="doi">10.1109/taffc.2015.2457417</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref79">
        <label>79</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Holm</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>A simple sequentially rejective multiple test procedure</article-title>
          <source>Scandinavian J Stat</source>
          <year>1979</year>
          <volume>6</volume>
          <issue>2</issue>
          <fpage>65</fpage>
          <lpage>70</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jstor.org/stable/4615733"/>
          </comment>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
