<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMH</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id>
      <journal-title>JMIR Mental Health</journal-title>
      <issn pub-type="epub">2368-7959</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i7e36828</article-id>
      <article-id pub-id-type="pmid">35802401</article-id>
      <article-id pub-id-type="doi">10.2196/36828</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Acoustic and Linguistic Features of Impromptu Speech and Their Association With Anxiety: Validation Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Torous</surname>
            <given-names>John</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>McGinnis</surname>
            <given-names>Ellen</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>McGinnis</surname>
            <given-names>Ryan</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Yadav</surname>
            <given-names>Vijay</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Teferra</surname>
            <given-names>Bazen Gashaw</given-names>
          </name>
          <degrees>BSc, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>The Edward S Rogers Sr Department of Electrical and Computer Engineering</institution>
            <institution>University of Toronto</institution>
            <addr-line>10 King’s College Road</addr-line>
            <addr-line>Toronto, ON, M5S 3G4</addr-line>
            <country>Canada</country>
            <phone>1 416 978 6992</phone>
            <email>bazen.teferra@mail.utoronto.ca</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-5325-9639</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Borwein</surname>
            <given-names>Sophie</given-names>
          </name>
          <degrees>BA, MPP, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6698-6648</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>DeSouza</surname>
            <given-names>Danielle D</given-names>
          </name>
          <degrees>BSc, MSc, PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6861-5691</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Simpson</surname>
            <given-names>William</given-names>
          </name>
          <degrees>BSc, PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1671-5660</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Rheault</surname>
            <given-names>Ludovic</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff5" ref-type="aff">5</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9599-0427</ext-link>
        </contrib>
        <contrib id="contrib6" contrib-type="author">
          <name name-style="western">
            <surname>Rose</surname>
            <given-names>Jonathan</given-names>
          </name>
          <degrees>BASc, MASc, PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3551-2175</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>The Edward S Rogers Sr Department of Electrical and Computer Engineering</institution>
        <institution>University of Toronto</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>School of Public Policy</institution>
        <institution>Simon Fraser University</institution>
        <addr-line>Vancouver, BC</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Winterlight Labs</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Psychiatry and Behavioural Neurosciences</institution>
        <institution>McMaster University</institution>
        <addr-line>Hamilton, ON</addr-line>
        <country>Canada</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Political Science</institution>
        <institution>Munk School of Global Affairs and Public Policy</institution>
        <institution>University of Toronto</institution>
        <addr-line>Toronto, ON</addr-line>
        <country>Canada</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Bazen Gashaw Teferra <email>bazen.teferra@mail.utoronto.ca</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>7</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>8</day>
        <month>7</month>
        <year>2022</year>
      </pub-date>
      <volume>9</volume>
      <issue>7</issue>
      <elocation-id>e36828</elocation-id>
      <history>
        <date date-type="received">
          <day>28</day>
          <month>1</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>25</day>
          <month>3</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>27</day>
          <month>4</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>23</day>
          <month>5</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Bazen Gashaw Teferra, Sophie Borwein, Danielle D DeSouza, William Simpson, Ludovic Rheault, Jonathan Rose. Originally published in JMIR Mental Health (https://mental.jmir.org), 08.07.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on https://mental.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://mental.jmir.org/2022/7/e36828" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>The measurement and monitoring of generalized anxiety disorder requires frequent interaction with psychiatrists or psychologists. Access to mental health professionals is often difficult because of high costs or insufficient availability. The ability to assess generalized anxiety disorder passively and at frequent intervals could be a useful complement to conventional treatment and help with relapse monitoring. Prior work suggests that higher anxiety levels are associated with features of human speech. As such, monitoring speech using personal smartphones or other wearable devices may be a means to achieve passive anxiety monitoring.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>This study aims to validate the association of previously suggested acoustic and linguistic features of speech with anxiety severity.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A large number of participants (n=2000) were recruited and participated in a single web-based study session. Participants completed the Generalized Anxiety Disorder 7-item scale assessment and provided an impromptu speech sample in response to a modified version of the Trier Social Stress Test. Acoustic and linguistic speech features were a priori selected based on the existing speech and anxiety literature, along with related features. Associations between speech features and anxiety levels were assessed using age and personal income as covariates.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Word count and speaking duration were negatively correlated with anxiety scores (<italic>r</italic>=–0.12; <italic>P</italic>&#60;.001), indicating that participants with higher anxiety scores spoke less. Several acoustic features were also significantly (<italic>P</italic>&#60;.05) associated with anxiety, including the mel-frequency cepstral coefficients, linear prediction cepstral coefficients, shimmer, fundamental frequency, and first formant. In contrast to previous literature, second and third formant, jitter, and zero crossing rate for the <italic>z</italic> score of the power spectral density acoustic features were not significantly associated with anxiety. Linguistic features, including negative-emotion words, were also associated with anxiety (<italic>r</italic>=0.10; <italic>P</italic>&#60;.001). In addition, some linguistic relationships were sex dependent. For example, the count of words related to power was positively associated with anxiety in women (<italic>r</italic>=0.07; <italic>P</italic>=.03), whereas it was negatively associated with anxiety in men (<italic>r</italic>=–0.09; <italic>P</italic>=.01).</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Both acoustic and linguistic speech measures are associated with anxiety scores. The amount of speech, acoustic quality of speech, and gender-specific linguistic characteristics of speech may be useful as part of a system to screen for anxiety, detect relapse, or monitor treatment.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>mental health</kwd>
        <kwd>generalized anxiety disorder</kwd>
        <kwd>impromptu speech</kwd>
        <kwd>acoustic features</kwd>
        <kwd>linguistic features</kwd>
        <kwd>mobile phone</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Anxiety disorders are among the most common mental health issues, with an incidence of approximately 10% in the Canadian population [<xref ref-type="bibr" rid="ref1">1</xref>]. Many Canadians are unable to access psychological and psychiatric resources to help those affected [<xref ref-type="bibr" rid="ref2">2</xref>], in part, because of the cost of professional help [<xref ref-type="bibr" rid="ref3">3</xref>]. It may be possible to address some of this deficit using methods that automate the measurement and diagnosis of anxiety disorders. The first step in this direction is to explore methods for the automatic detection of mental health issues that could be used to trigger early intervention, monitor treatment response, or detect relapse. In addition, frequent monitoring together with other time-series information could be used to help understand the mechanisms of generalized anxiety disorder (GAD) itself. An avenue of such automation is recording an individual’s speech and looking for signals of anxiety within the recordings.</p>
        <p>In this work, we focused specifically on GAD [<xref ref-type="bibr" rid="ref4">4</xref>]. A reason that GAD may be detectable in speech is that those with anxiety disorders exhibit higher activation of the sympathetic nervous system under stress than those without anxiety [<xref ref-type="bibr" rid="ref5">5</xref>], which in turn influences the production of speech [<xref ref-type="bibr" rid="ref6">6</xref>]. The goal of this work was to collect a large set of samples of audio speech, each with a self-reported measure of anxiety scale, and explore whether acoustic and linguistic signals correlated with measured anxiety. We built on previous studies by collecting approximately 10 times greater number of human participants than previous research on the detection of anxiety in speech. Many of the signals that we explored have been previously reported as significantly correlated with anxiety in the literature, and our goal was to leverage our larger sample size to examine which signals could be most useful in identifying anxiety in speech. We also explored linguistic indicators of anxiety that have not been considered before.</p>
        <p>This paper is organized as follows: the next section summarizes related work in anxiety detection. The <italic>Methods</italic> section describes the speech sample collection methods and the set of features considered for correlation with anxiety. The <italic>Results</italic> section reports on the demographics of participants and feature correlations, whereas the <italic>Discussion</italic> section discusses the results and their implications for future research on anxiety detection. A final section provides our conclusions.</p>
      </sec>
      <sec>
        <title>Related Work</title>
        <p>Although it is important to note that some scholarship is skeptical that biomarkers correlate with emotions [<xref ref-type="bibr" rid="ref7">7</xref>], here we review existing work exploring associations between both acoustic and linguistic speech features and anxiety severity in healthy and clinical cohorts. It should be noted that these studies explore broader classes of anxiety disorders, including internalizing disorders, social phobia or social anxiety disorder (SAD), panic disorder, and agoraphobia, as well as GAD.</p>
        <p>McGinnis et al [<xref ref-type="bibr" rid="ref8">8</xref>] identified several acoustic characteristics of speech that can be used to detect anxiety disorders in children. Studying 71 participants aged 3 to 8 years, the researchers were able to detect internalizing disorders—a collective term for anxiety and depression—from speech. The authors extracted and selected several acoustic features from the speech produced in a 3-minute task based on the Trier Social Stress Test (TSST) for children [<xref ref-type="bibr" rid="ref9">9</xref>]. These features included zero crossing rate, mel-frequency cepstral coefficients (MFCCs) [<xref ref-type="bibr" rid="ref10">10</xref>], zero crossing rate for the <italic>z</italic> score of the power spectral density (ZCR-zPSD), dominant frequency, mean frequency, perceptual spectral centroid, spectral flatness, and the skew and kurtosis of the power spectral density. Using the Davies-Bouldin index–based feature selection [<xref ref-type="bibr" rid="ref11">11</xref>], the MFCC features and ZCR-zPSD had the highest Davies-Bouldin score. Several models were built to predict which children had an internalizing disorder (n=43 out of 71) or were healthy. Both logistic regression and support vector machine [<xref ref-type="bibr" rid="ref12">12</xref>] analysis achieved a classification accuracy of 80%.</p>
        <p>Özseven et al [<xref ref-type="bibr" rid="ref13">13</xref>] conducted a study of the speech of 43 adults aged 17 to 55 years. Of these 43 adults, 21 were clinically diagnosed with GAD, 2 were diagnosed with panic disorder, and 20 were healthy controls. The study explored 122 acoustic features derived from the participants’ speech to determine the correlation between these features and anxiety. Their results showed that 42 of the features (including MFCCs, linear prediction cepstral coefficients [LPCCs], fundamental frequency [F0], first formant [F1], second formant [F2], third formant [F3], jitter, and shimmer) showed a significant change between a neutral state and an anxious state in the participants with anxiety.</p>
        <p>Weeks et al [<xref ref-type="bibr" rid="ref14">14</xref>] found a relationship between anxiety and alterations in voice. Specifically, their study showed a link between vocal pitch (characterized by F0) and SAD. They collected impromptu speech samples from 46 undergraduate students, 25 with a diagnosis of SAD and 21 healthy controls. Participants also completed the Beck Anxiety Scale as a measure of self-reported anxiety severity [<xref ref-type="bibr" rid="ref15">15</xref>]. Their results indicated that mean F0 was positively correlated (<italic>r</italic>=0.72; <italic>P</italic>=.002) with anxiety severity across all male participants. However, the correlation for female participants was weaker (<italic>r</italic>=0.02; <italic>P</italic>=.92), indicating possible sex differences in the relationship between anxiety severity and vocal pitch.</p>
        <p>Laukka et al [<xref ref-type="bibr" rid="ref16">16</xref>] explored the relationship between anxiety and the acoustic features of speech. They collected speech data from 71 patients with social phobia delivering public speeches and extracted 4 types of speech features: pitch (F0 mean, F0 SD, and F0 maximum), loudness (intensity mean), voice quality (HF 500, relative proportion of high-frequency spectral energy above vs below 500), and temporal aspects of speech (articulation rate and percentage of silence). The researchers observed a significant change from before treatment to after treatment (a pharmacological anxiolytic treatment for social anxiety) in F0 mean, F0 maximum, HF 500, and percentage of silence. They also calculated the Pearson correlation coefficient between state anxiety measured by the Spielberger State-Trait Anxiety Inventory [<xref ref-type="bibr" rid="ref17">17</xref>] and the speech features. Those with a significant correlation were F0 SD (<italic>r</italic>=–0.24; <italic>P</italic>&#60;.05) and percentage of silence (<italic>r</italic>=0.36; <italic>P</italic>&#60;.01).</p>
        <p>Albuquerque et al [<xref ref-type="bibr" rid="ref18">18</xref>] investigated the relationship between acoustic speech features and anxiety. They recruited 112 adult Portuguese speakers who performed 2 tasks: reading vowels in disyllabic words and picture description. The authors extracted 18 acoustic features, including F0, F1, F2, speech duration, number of pauses, and articulation rate. They measured the percentage change between participants who were nonanxious (Hospital Anxiety and Depression Scale, Anxiety subscale [<xref ref-type="bibr" rid="ref19">19</xref>] score ≤7) and those who were anxious (Hospital Anxiety and Depression Scale, Anxiety subscale score &#62;7) and observed a change of &#62;10% in speech duration.</p>
        <p>Wörtwein et al [<xref ref-type="bibr" rid="ref20">20</xref>] assessed the behaviors of participants experiencing anxiety caused by public speaking through audiovisual features. A total of 45 participants were recruited from Craigslist. These participants were asked to complete the Personal Report of Confidence as a Speaker scale [<xref ref-type="bibr" rid="ref21">21</xref>], which estimates public speaking anxiety levels. Several audio features were extracted from the audio and their results showed significant relationships between the Personal Report of Confidence as a Speaker scale and SD of the 0th coefficient of the MFCC [<xref ref-type="bibr" rid="ref10">10</xref>] (<italic>r</italic>=–0.36; <italic>P</italic>&#60;.05), SD of F1 (<italic>r</italic>=–0.41; <italic>P</italic>&#60;.01), and the total pause duration (<italic>r</italic>=0.35; <italic>P</italic>&#60;.05).</p>
        <p>Hagenaars and van Minnen [<xref ref-type="bibr" rid="ref22">22</xref>] explored whether the activation of fear was manifested in the speech of 25 female patients diagnosed with panic disorder. Their results showed that patients with panic disorder have a significantly higher pitch (<italic>P</italic>&#60;.001) during autobiographical fear memory. Respondents also spoke significantly slower (<italic>P</italic>&#60;.001) during autobiographical talking than during script talking.</p>
        <p>Di Matteo et al [<xref ref-type="bibr" rid="ref23">23</xref>] explored the relationship between linguistic features of speech and anxiety. Their work used <italic>passively</italic> collected intermittent samples of audio data from participants’ smartphones, collected over a 2-week period, as input. The study had 84 nonclinical participants recruited from a web-based recruitment platform. The audio was converted to text, and the authors used the Linguistic Inquiry and Word Count (LIWC) approach [<xref ref-type="bibr" rid="ref24">24</xref>] to classify the words into 67 different categories. They calculated correlations with 4 self-report measures: SAD, GAD, depression, and functional impairment. They observed a significant correlation between words related to perceptual processes (eg, <italic>see</italic> in the LIWC) with SAD (<italic>r</italic>=0.31; <italic>P</italic>=.003) and words related to rewards with GAD (<italic>r</italic>=–0.29; <italic>P</italic>=.007).</p>
        <p>In a similar study that used LIWC features, Anderson et al [<xref ref-type="bibr" rid="ref25">25</xref>] recruited 42 participants diagnosed with SAD and 27 healthy controls to explore the differences in the words used between these 2 groups. The participants were asked to write a distinct autobiographical and socially painful passage. They used the LIWC to extract the word count in each of the LIWC categories, such as first-person singular pronouns, anxiety-related words, and fear-related words. Their results showed that patients with SAD used more first-person singular pronouns (I, me, and mine), anxiety-related words, sensory and perceptual words, and words denoting physical touch, as well as fewer references to other people.</p>
        <p>Overall, previous work identifies several audio features that are correlated with anxiety. However, the results are mixed because of differences in participants recruited, speech measures assessed, statistical methods used, and amount of mood induction. In addition, the largest sample size among these studies was 112, which limits the potential for generalizability to the larger population, a necessary step before considering the deployment of technologies for passive anxiety monitoring. In this study, we recruited a substantially larger cohort (n=2000) to explore features of speech from previous findings at a greater scale.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Data Collection</title>
        <p>Participants from a nonclinical population were recruited for a 10- to 15-minute task implemented through a custom website. Self-report measures of anxiety were collected once at the beginning of the study and at the end of each of 2 specific tasks. In the following subsections, we describe the recruitment of participants, the data collection procedure, and the assessment of anxiety and speech measures.</p>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>The study was approved by the University of Toronto Research Ethics Board (37584).</p>
      </sec>
      <sec>
        <title>Recruitment and Demographics</title>
        <p>A total of 2000 participants were recruited using the Prolific [<xref ref-type="bibr" rid="ref26">26</xref>] web-based human participant recruitment platform. Prolific maintains a list of registered participants and, for each participant, many characteristics, including age, income, sex, primary language spoken, country of birth, and residence. The inclusion criteria for this study were as follows: age range 18 to 65 years; fluency in English; English as a first language; and at least 10 previous studies completed on Prolific, with 95% of these previous Prolific tasks completed satisfactorily, as labeled by the study author. The data set was also balanced for sex (1000/2000, 50% female, and 1000/2000, 50% male). The Prolific platform provides us with some relevant demographics of the participants, including their age and income.</p>
        <p>Participants who completed the study were paid £2 (US $2.74). They were able to complete the entire study remotely, using their PCs.</p>
      </sec>
      <sec>
        <title>Study Procedure</title>
        <p>Participants were presented with the opportunity to participate in this study on Prolific if they met the aforementioned inclusion criteria. Those who wished to participate clicked on the study link, which brought them to a consent form that described the procedure and goals of the study and also provided information on data privacy. After they gave consent, a hyperlink brought participants to an external web application (a screenshot of which is presented in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) that implemented the tasks described in the following sections.</p>
        <p>Participants were first asked to fill out the standard Generalized Anxiety Disorder 7-item scale (GAD-7) questionnaire [<xref ref-type="bibr" rid="ref27">27</xref>], which is described in more detail in the <italic>Anxiety Measures</italic> section. Next, they were asked to complete 2 speech tasks, which were recorded using their computer’s internal microphone. It should be noted that our protocol also involved recording a video of the participants’ faces during both speech tasks. Although that video is not used in the work reported here, the fact that the video was requested may have influenced the set of participants willing to continue participation, as discussed later in this paper.</p>
        <p>For the first speech task (task 1), participants were asked to read aloud a specific passage titled <italic>My Grandfather</italic>, which is a public domain passage that contains nearly all the phonemes of American English [<xref ref-type="bibr" rid="ref28">28</xref>]. The full script of this passage is presented in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. This passage is not intended to induce stress or anxiety but to provide a baseline speech sample for each participant. It was used in this work to test the quality of the speech-to-text (STT) transcription.</p>
        <p>For the second speech task (task 2), the participant followed a modified version of the widely used TSST [<xref ref-type="bibr" rid="ref29">29</xref>] for the purpose of inducing a moderate amount of stress. We chose to base our anxiety stimulus on the TSST as previous studies [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>] have shown a higher activation in participants with relatively higher anxiety after exposure to moderate stress induced by the TSST.</p>
        <p>In this modified version of the TSST, participants were told to imagine that they were a job applicant for a job that they really want (their <italic>dream</italic> job) and they were invited for an interview with a hiring manager. They were given a few minutes to prepare—to decide what their <italic>dream</italic> job is—and how they would convince an interviewer that they are the right person for the position. Participants were also told that the recorded video would be viewed by researchers studying their behavior and language. Participants were then asked to speak for 5 minutes, making the case for themselves to be hired for that dream job.</p>
        <p>It should be noted that in the original TSST [<xref ref-type="bibr" rid="ref29">29</xref>], participants would normally deliver their speech in front of a live panel of judges. If a participant finished their delivery in &#60;5 minutes, the judges in the original TSST design would encourage the participant to keep speaking for the full 5 minutes. An example statement of encouragement is as follows: “What are your personal strengths?” In our modified TSST, we implemented a similar method to encourage participants to speak for the full 5 minutes. When our software detected silence (the absence of speech for &#62;6 seconds), it displayed several different prompts, which are reproduced in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>, inviting participants to keep speaking on different topics related to the task. Finally, it should be noted that the modified TSST only made use of the first part of the original TSST and not the second task involving mental arithmetic.</p>
      </sec>
      <sec>
        <title>Anxiety Measures</title>
        <p>Our goal was to examine possible correlations between features of speech and GAD, based largely on previously suggested features. To measure the severity of GAD, we used the GAD-7 [<xref ref-type="bibr" rid="ref27">27</xref>], which is a 7-item questionnaire that asks participants how often they were bothered by anxiety-related problems during the previous 2 weeks. Although the 2-week period suggests that the GAD-7 measures a temporary condition, this seems to be in contradiction with the fact that a GAD diagnosis requires a 6-month duration of symptoms [<xref ref-type="bibr" rid="ref32">32</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. However, the GAD-7 has been validated as a diagnostic tool for GAD (using a value of 10 as the cutoff threshold) with a sensitivity of 89% and a specificity of 82% [<xref ref-type="bibr" rid="ref27">27</xref>]. Thus, we chose to use the GAD-7 to obtain a binary label of GAD (using the same threshold of 10) as our main indicator of anxiety.</p>
        <p>Each of the 7 questions on the GAD-7 has 4 options for the participant to select from, indicating how often they have been bothered by the 7 problems on the scale. These options and their numerical ratings are as follows: 0=not at all, 1=several days, 2=more than half the days, and 3=nearly every day. The final GAD-7 score is a summation of the values for each question, giving a severity measure for GAD in the range of 0 (no anxiety symptoms) to 21 (severe anxiety symptoms).</p>
        <p>We also used a second, informal anxiety measure in this study to serve as an internal check to measure how much, on average, the modified TSST (task 2) induced stress and anxiety compared with task 1 (the reading or speaking of the <italic>My</italic> <italic>Grandfather</italic> passage). Here, we used a single question to measure self-reported levels of anxiety on a 4-point scale. We asked participants how anxious they felt during the task and to choose from the following numerical rating: 0=not anxious at all, 1=somewhat anxious, 2=very anxious, and 3=extremely anxious. This question was deployed immediately after the first and second tasks had been completed.</p>
      </sec>
      <sec>
        <title>Selection of Acoustic and Linguistic Features</title>
        <sec>
          <title>Overview</title>
          <p>Prior work suggested that information about the mental state of a person may be acquired from the signals within speech acoustics [<xref ref-type="bibr" rid="ref34">34</xref>] and the language used [<xref ref-type="bibr" rid="ref35">35</xref>]. We refer to each kind of this extracted information as a <italic>feature</italic> using the terminology used in the field of machine learning.</p>
          <p>In this work, we considered both acoustic and linguistic features, which are described in the following sections. These features were extracted from each of the 5-minute speech samples in which the participant responded to the modified TSST task. It should be noted that all the participants were prompted to speak for the full 5 minutes, as described in the <italic>Study Procedure</italic> section, although the total speech duration of each participant may vary.</p>
        </sec>
        <sec>
          <title>Acoustic Features</title>
          <sec>
            <title>Overview</title>
            <p>Previous research has identified several acoustic features that are correlated with anxiety, as described in the <italic>Related Work</italic> section. Using these previous findings as a reference point, we selected the acoustic features described in the following sections for our empirical analysis. The features were extracted using the following software packages: My-Voice Analysis [<xref ref-type="bibr" rid="ref36">36</xref>], Surfboard [<xref ref-type="bibr" rid="ref37">37</xref>], and Librosa [<xref ref-type="bibr" rid="ref38">38</xref>].</p>
          </sec>
          <sec>
            <title>MFCC Features</title>
            <p>These are coefficients derived from a mel-scale cepstral representation of an audio signal. We included 13 MFCCs, a common set of acoustic signals designed to reflect changes in perceivable pitch. The MFCC features were shown to be related to anxiety in 3 studies [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Descriptive statistics (mean and SD) of the 13 MFCC features were used in this study. It should be noted that not all MFCC features included in this study were determined to be significant in prior work; however, these 13 are most commonly assessed together, and thus, we included them all as features of interest. The parameters we used when extracting these 13 MFCC features were as follows: window length=2048 samples, length of fast Fourier transform window=2048 samples, samples advance between successive frames=512 samples, window type=Hanning, and number of mel bands=128.</p>
          </sec>
          <sec>
            <title>LPCC Features</title>
            <p>These are coefficients derived from a linear prediction cepstral representation of an audio signal. The first 13 cepstrum coefficients were used here. The LPCC features were shown to be related with anxiety in the study by Özseven et al [<xref ref-type="bibr" rid="ref13">13</xref>]. Descriptive statistics (mean and SD) of the 13 LPCCs were used in our study.</p>
          </sec>
          <sec>
            <title>ZCR-zPSD Features</title>
            <p>In the study by McGinnis et al [<xref ref-type="bibr" rid="ref8">8</xref>], ZCR-zPSD was one of the top features selected using Davies-Bouldin index–based feature selection [<xref ref-type="bibr" rid="ref11">11</xref>] for an anxiety-prediction task.</p>
          </sec>
          <sec>
            <title>Amount of Speech</title>
            <p>This refers to the amount of speech and related metrics such as the percentage of silence. These features have been shown to be related to anxiety in 3 studies [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref20">20</xref>]. Our specific feature was the amount of time, in seconds, that speech was present. We also counted the total number of words present in an STT transcript as a separate measure of the amount of speech.</p>
          </sec>
          <sec>
            <title>Articulation Rate</title>
            <p>This indicates how fast the participant spoke. The study by Hagenaars and van Minnen [<xref ref-type="bibr" rid="ref22">22</xref>] suggested that patients with panic disorder spoke significantly slower (<italic>P</italic>&#60;.001) during autobiographical talking than when reading a script.</p>
          </sec>
          <sec>
            <title>F0 Feature</title>
            <p>This is the frequency at which the glottis vibrates, also known as the <italic>pitch</italic> of the voice. Multiple studies have shown F0 to be one of the acoustic features affected by anxiety [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref22">22</xref>]. F0 varies throughout a person’s speech; therefore, both the mean and SD of F0 were used as features.</p>
          </sec>
          <sec>
            <title>F1, F2, and F3 Features</title>
            <p>These are the F1, F2, and F3 [<xref ref-type="bibr" rid="ref39">39</xref>]. The study by Özseven et al [<xref ref-type="bibr" rid="ref13">13</xref>] showed a significant relation with anxiety. The mean and SD of each formant were used as features.</p>
          </sec>
          <sec>
            <title>Jitter</title>
            <p>This refers to the cycle-to-cycle F0 variation of the sound wave. <italic>Jitter</italic> has been shown to be an indicator of anxiety [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref40">40</xref>,<xref ref-type="bibr" rid="ref41">41</xref>].</p>
          </sec>
          <sec>
            <title>Shimmer</title>
            <p>This refers to the cycle-to-cycle amplitude variation of the sound wave. <italic>Shimmer</italic> has been shown to be related to anxiety severity [<xref ref-type="bibr" rid="ref13">13</xref>].</p>
          </sec>
          <sec>
            <title>Intensity</title>
            <p>The squared mean of the amplitude of the sound wave within a given frame, also known as <italic>intensity</italic>, has been shown to be related to anxiety [<xref ref-type="bibr" rid="ref16">16</xref>]. As the amplitude of a sound wave varies during speech, the mean and SD were used as features.</p>
          </sec>
        </sec>
        <sec>
          <title>Linguistic Features</title>
          <p>Using Amazon’s AWS STT [<xref ref-type="bibr" rid="ref42">42</xref>] program, a transcript was produced from the audio recordings. From the transcripts, linguistic features were extracted using the LIWC software (Pennebaker Conglomerates, Inc) [<xref ref-type="bibr" rid="ref24">24</xref>], which places words into dictionaries based on semantic categories. For example, 1 category is called <italic>negemo</italic> and contains words that relate to negative emotions, such as <italic>hurt</italic>, <italic>ugly</italic>, and <italic>nasty</italic>. Another category is called <italic>health</italic> and contains words such as <italic>clinic</italic>, <italic>flu</italic>, and <italic>pill</italic>. There is also a category called <italic>anxiety,</italic> which includes words such as <italic>anxiety</italic> and <italic>fearful</italic>. Some categories are contained within others; for example, <italic>anxiety</italic> is contained within <italic>negemo</italic>.</p>
          <p>To apply the LIWC dictionaries, one simply counts the number of words that belong to each category, and each count becomes a feature. There are 93 categories in the LIWC, although not all are relevant for an STT transcript. We removed those features that were not relevant; for example, informal language words such as <italic>lol</italic> and <italic>btw</italic>. Other excluded categories included those related to some punctuation marks (eg, colons, quotation marks, and parentheses). After removing these, 80 linguistic features remained. Prior work [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref25">25</xref>], which was discussed in the <italic>Related Work</italic> section, has shown that LIWC categories related to perceptual processes (see, hear, and feel), words related to rewards, the use of the first-person singular pronoun, and anxiety-related words were associated with anxiety.</p>
        </sec>
      </sec>
      <sec>
        <title>Separation of Data for Analysis</title>
        <p>The overarching objective of this study was to gain an understanding of which features of speech—both acoustic and linguistic—are correlated with the GAD-7. However, it is known that certain demographic attributes are directly indicative of anxiety. For example, sex is known to influence the prevalence of anxiety [<xref ref-type="bibr" rid="ref43">43</xref>]. In addition, both age [<xref ref-type="bibr" rid="ref44">44</xref>] and income [<xref ref-type="bibr" rid="ref45">45</xref>] influence anxiety, which suggests the need to control for these demographics. An additional reason to control for the demographics is that both age and income have been shown to be related to speech features [<xref ref-type="bibr" rid="ref46">46</xref>,<xref ref-type="bibr" rid="ref47">47</xref>]. towing to the strong effect of sex on the GAD-7 score, we created separate data sets for analysis of female and male samples, in addition to the combined data set. We chose to do this, rather than correcting for sex computationally, because it leaves the data intact.</p>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>The partial Pearson correlation coefficient [<xref ref-type="bibr" rid="ref48">48</xref>] was computed between each of the features and the GAD-7 (controlling for the effect of age and personal income). Correlations were examined for 3 versions of the data set: the entire sample data set and separately by sex for male and female participants. We considered a result statistically significant at a significance level of <italic>P</italic>=.05. The <italic>P</italic> values were not corrected to account for the large number of tests as we attempted to use features that were determined to be significant in previous works.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>This section reports the main empirical results. We begin by discussing the recruitment yield, the demographic characteristics of the participants, and the relationship between demographic attributes and the reported GAD-7 score. Next, we report correlations for the features described in the <italic>Selection of Acoustic and Linguistic Features</italic> section.</p>
      </sec>
      <sec>
        <title>Recruitment and Data Inclusion</title>
        <p>A total of 4542 participants accepted the offer from the Prolific recruitment platform to participate in the study, of whom 2212 (48.7%) completed the study, giving a recruitment yield of approximately 49%.</p>
        <p>Of the 2212 participants who completed the study, 2000 (90.42%) provided acceptable submissions (and thus received payment), giving a submission-to-approval yield of approximately 90%. To be clear, the recruitment continued until 2000 acceptable submissions were received. The reasons for which submissions were deemed unacceptable included the following: a missing video, a missing or grossly imperfect audio, or failure to complete one or both tasks. These acceptability criteria were distinct from those used in the subsequent review of audio quality that is described in the following paragraphs. The period of recruitment ranged from November 23, 2020, to May 28, 2021. Of note, the recruitment took place during the global COVID-19 pandemic.</p>
        <p>In addition to the aforementioned submission approval criteria, we reviewed the input data and audio for acceptability using the following procedure. To begin, we computed all acoustic and linguistic features described in the <italic>Selection of Acoustic and Linguistic Features</italic> section. Recordings with poor quality were filtered out for manual review based on the following criteria:</p>
        <list list-type="order">
          <list-item>
            <p>A task 2 word count of &#60;125</p>
          </list-item>
          <list-item>
            <p>A speaking duration for task 2 of &#60;60 seconds (compared with the full 5 minutes)</p>
          </list-item>
          <list-item>
            <p>Any other feature value being beyond 3 SDs from the mean in either direction (outliers)</p>
          </list-item>
        </list>
        <p>Of the 2000 participant recordings, 193 (9.65%) were flagged based on these criteria. For each of these, a researcher (BGT) listened to the task 2 audio recordings. The researcher discarded any samples that were deemed, subjectively, to be of insufficient audio quality or those whose response to task 2 was not responsive to the task itself. Of the 193 flagged participants, 123 (63.7%) were rejected through this manual review, meaning that of the 2000 samples, 1877 (93.85%) remained.</p>
        <p>Finally, the 1877 samples were checked for missing data, and 133 (7.09%) participants had missing demographic information; consequently, the final number of participants included in our analysis was 1744 (92.91%). The flow chart of the study recruitment and quality control is presented in <xref rid="figure1" ref-type="fig">Figure 1</xref>. We also explored correlations of the excluded data with the GAD-7, often called missingness analysis, and this is presented in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Study recruitment flow chart.</p>
          </caption>
          <graphic xlink:href="mental_v9i7e36828_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Data Overview and Demographics of Participants</title>
        <p>Of the 1744 participants, 540 (30.96%) were above the GAD-7 screening threshold of 10 and 1204 (69.04%) were below the GAD-7 screening threshold of 10. Hereon, we will refer to those participants with a GAD-7 score ≥10 as the group <italic>with anxiety</italic> and those with a GAD-7 score &#60;10 as the <italic>nonanxious</italic> group.</p>
        <p><xref ref-type="table" rid="table1">Table 1</xref> shows participants’ demographics, obtained from the Prolific recruitment platform. Columns 1 and 2 of the table show the name of demographic attributes and each category, whereas columns 3 and 4 give the number (and percentage) of participants with that attribute in the group with anxiety and the nonanxious group, respectively. Column 5 gives the <italic>P</italic> value for a chi-square test of the null of independence to determine whether there is a significant difference between the group with anxiety and the nonanxious group for each categorical factor.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Demographic characteristics of participants in the group with anxiety and the nonanxious group (N=1744).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="210"/>
            <col width="280"/>
            <col width="280"/>
            <col width="0"/>
            <col width="200"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Demographic factors</td>
                <td>Group with anxiety (n=540), n (%)</td>
                <td>Nonanxious group (n=1204), n (%)</td>
                <td colspan="2"><italic>P</italic> value from chi-square test</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="5">
                  <bold>Sex</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Male</td>
                <td>229 (42.41)</td>
                <td>653 (54.24)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Female</td>
                <td>311 (57.59)</td>
                <td>551 (45.76)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Self-reported ongoing mental health illness or condition</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Yes</td>
                <td>297 (55)</td>
                <td>311 (25.83)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>No</td>
                <td>243 (45)</td>
                <td>893 (74.17)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Personal income, pounds sterling (£1=US $1.37)</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>&#60;10,000</td>
                <td>181 (33.52)</td>
                <td>281 (23.34)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>10,000 to 19,999</td>
                <td>112 (20.74)</td>
                <td>208 (17.28)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>20,000 to 29,999</td>
                <td>92 (17.04)</td>
                <td>259 (21.51)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>30,000 to 39,999</td>
                <td>60 (11.11)</td>
                <td>184 (15.28)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>40,000 to 49,999</td>
                <td>36 (6.67)</td>
                <td>109 (9.05)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>50,000 to 59,999</td>
                <td>20 (3.7)</td>
                <td>74 (6.15)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥60,000</td>
                <td>39 (7.22)</td>
                <td>89 (7.39)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td colspan="5">
                  <bold>Age (years)</bold>
                </td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>18 to 19</td>
                <td>27 (5)</td>
                <td>44 (3.65)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>20 to 29</td>
                <td>239 (44.26)</td>
                <td>379 (31.48)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>30 to 39</td>
                <td>162 (30)</td>
                <td>334 (27.74)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>40 to 49</td>
                <td>67 (12.41)</td>
                <td>219 (18.19)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>50 to 59</td>
                <td>39 (7.22)</td>
                <td>132 (10.96)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>≥60</td>
                <td>6 (1.11)</td>
                <td>96 (7.97)</td>
                <td colspan="2">
                  <break/>
                </td>
              </tr>
            </tbody>
          </table>
        </table-wrap>
      </sec>
      <sec>
        <title>Posttask Self-report Anxiety Measure</title>
        <p>As described in the <italic>Anxiety Measures</italic> section, participants were asked to rate their state of anxiety after each task on a scale of 0 to 3, where 3 was the highest level of anxiety. A paired 2-tailed <italic>t</italic> test was conducted to assess the difference between the 2 measurements. The test validates that the modified TSST task successfully induced some anxiety in participants, with the average score on the self-reported state anxiety measure increasing from 0.5 (SD 0.6) to 1.5 (SD 0.9; <italic>P</italic>&#60;.001) before and after completing task 2, respectively.</p>
      </sec>
      <sec>
        <title>Feature Correlations</title>
        <sec>
          <title>Overview</title>
          <p>The <italic>Selection of Acoustic and Linguistic Features</italic> section describes the set of acoustic and linguistic features that were selected. These were features that were reported as significant in prior work on anxiety and speech, as well as closely associated features. These features were computed on the speech samples of participants performing task 2—the modified TSST. The following subsections summarize the main empirical results. The correlation between demographics and the acoustic and linguistic features is presented in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>, and the intercorrelation among the significant features is presented in <xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref>, <xref ref-type="supplementary-material" rid="app7">Multimedia Appendix 7</xref>, and <xref ref-type="supplementary-material" rid="app8">Multimedia Appendix 8</xref> for the all-sample, female-sample, and male-sample data sets, respectively.</p>
        </sec>
        <sec>
          <title>Amount of Speech</title>
          <p>The features with one of the highest correlations for both the male-sample and female-sample data sets were those related to the amount the participant spoke during task 2. The 2 specific features used to estimate speech length were speaking duration (the number of seconds of speech present within the 5-minute speech task) and the word count derived from an STT transcript. <xref ref-type="table" rid="table2">Table 2</xref> presents the correlation for the all-sample data set (controlling for sex, age, and income) and for separated female-sample and male-sample data sets (controlling for age and income). <xref rid="figure2" ref-type="fig">Figure 2</xref> presents a scatter plot of speaking duration versus the GAD-7, as well as the distribution of both variables, for all 3 data sets. The scatter plot is colored to give a better sense of the density of data points. <xref rid="figure3" ref-type="fig">Figure 3</xref> provides the same kind of scatter plots and distributions for the word count metric of task 2.</p>
          <table-wrap position="float" id="table2">
            <label>Table 2</label>
            <caption>
              <p>Correlation of amount of speech features with the Generalized Anxiety Disorder 7-item scale.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="570"/>
              <col width="200"/>
              <col width="200"/>
              <thead>
                <tr valign="top">
                  <td colspan="2">Sample and feature</td>
                  <td>
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="4">
                    <bold>All samples (N=1744)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Speaking duration</td>
                  <td>–0.12</td>
                  <td>&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Word count</td>
                  <td>–0.12</td>
                  <td>&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td colspan="4">
                    <bold>Female samples (n=862)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Word count</td>
                  <td>–0.13</td>
                  <td>&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Speaking duration</td>
                  <td>–0.11</td>
                  <td>&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td colspan="4">
                    <bold>Male samples (n=882)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Speaking duration</td>
                  <td>–0.13</td>
                  <td>&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Word count</td>
                  <td>–0.12</td>
                  <td>&#60;.001</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>Speaking duration versus Generalized Anxiety Disorder 7-item scale (GAD-7) scatter plot and distributions.</p>
            </caption>
            <graphic xlink:href="mental_v9i7e36828_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure3" position="float">
            <label>Figure 3</label>
            <caption>
              <p>Word count (WC) versus Generalized Anxiety Disorder 7-item scale (GAD-7) scatter plot and distributions.</p>
            </caption>
            <graphic xlink:href="mental_v9i7e36828_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
        <sec>
          <title>Acoustic Feature Correlation With the GAD-7</title>
          <p><xref ref-type="table" rid="table3">Table 3</xref> presents the correlation and <italic>P</italic> values for all the acoustic features (presented in the <italic>Acoustic Features</italic> section) that had <italic>P</italic> values above the 95% CI for the 3 data sets: all participants, female-only participants, and male-only participants. Again, it should be noted that all correlations were computed after controlling for age and personal income, whereas the calculations involving all participants also controlled for sex.</p>
          <p><xref ref-type="table" rid="table4">Table 4</xref> reports results for features that previous work found to be statistically significant but for which we found no correlation in our sample. In our results, these features were not significantly associated with anxiety in any of the 3 data sets: all participants, female-only participants, and male-only participants.</p>
          <p><xref ref-type="table" rid="table5">Table 5</xref> makes a direct comparison between previous work on the specific features (and their relation to anxiety) and the results from this study.</p>
          <table-wrap position="float" id="table3">
            <label>Table 3</label>
            <caption>
              <p>Correlation of significant acoustic features with the Generalized Anxiety Disorder 7-item scale.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="470"/>
              <col width="0"/>
              <col width="250"/>
              <col width="0"/>
              <col width="250"/>
              <thead>
                <tr valign="top">
                  <td colspan="3">Sample and feature</td>
                  <td colspan="2">
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="6">
                    <bold>All samples (N=1744)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Shimmer</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_2</td>
                  <td colspan="2">–0.08</td>
                  <td colspan="2">.002</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_3</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.002</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_mean_2</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.004</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>f0_std</td>
                  <td colspan="2">0.06</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_5</td>
                  <td colspan="2">–0.06</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_4</td>
                  <td colspan="2">–0.05</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Female samples (n=862)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_3</td>
                  <td colspan="2">–0.10</td>
                  <td colspan="2">.002</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Shimmer</td>
                  <td colspan="2">0.10</td>
                  <td colspan="2">.004</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>lpcc_std_6</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.008</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>lpcc_std_4</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.008</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_mean_2</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Intensity_mean</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_mean_1</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>lpcc_std_10</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>intensity_std</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>lpcc_std_12</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_mean_8</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>lpcc_mean_4</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.049</td>
                </tr>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Male samples (n=882)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_2</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.005</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_5</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_mean_5</td>
                  <td colspan="2">–0.08</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>f0_std</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_4</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Shimmer</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>mfcc_std_11</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.046</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>f1_mean</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.047</td>
                </tr>
              </tbody>
            </table>
          </table-wrap>
          <table-wrap position="float" id="table4">
            <label>Table 4</label>
            <caption>
              <p>Correlation of acoustic features not found to be significant.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="150"/>
              <col width="430"/>
              <col width="70"/>
              <col width="70"/>
              <col width="0"/>
              <col width="70"/>
              <col width="70"/>
              <col width="0"/>
              <col width="70"/>
              <col width="70"/>
              <thead>
                <tr valign="top">
                  <td>Feature</td>
                  <td>Previous works</td>
                  <td colspan="8">This study</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td colspan="3">All samples</td>
                  <td colspan="3">Female samples</td>
                  <td colspan="2">Male samples</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>
                    <break/>
                  </td>
                  <td>
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                  <td colspan="2">
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                  <td colspan="2">
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Jitter</td>
                  <td>Showed a significant increase from a neutral state to an anxious state [<xref ref-type="bibr" rid="ref13">13</xref>]</td>
                  <td>0.03</td>
                  <td>.18</td>
                  <td colspan="2">–0.01</td>
                  <td>.76</td>
                  <td colspan="2">0.06</td>
                  <td>.06</td>
                </tr>
                <tr valign="top">
                  <td>ZCR-zPSD<sup>a</sup></td>
                  <td>ZCR-zPSD was one of the top selected features using the Davies-Bouldin index–based feature selection [<xref ref-type="bibr" rid="ref8">8</xref>]</td>
                  <td>0.01</td>
                  <td>.67</td>
                  <td colspan="2">–0.04</td>
                  <td>.29</td>
                  <td colspan="2">0.05</td>
                  <td>.14</td>
                </tr>
                <tr valign="top">
                  <td>Articulation rate</td>
                  <td>Patients with panic disorder spoke significantly slower (<italic>P</italic>&#60;.001) during autobiographical talking than during script talking [<xref ref-type="bibr" rid="ref22">22</xref>]</td>
                  <td>–0.01</td>
                  <td>.64</td>
                  <td colspan="2">–0.05</td>
                  <td>.12</td>
                  <td colspan="2">0.02</td>
                  <td>.55</td>
                </tr>
                <tr valign="top">
                  <td>F1<sup>b</sup> SD</td>
                  <td>Showed a significant change between neutral state and anxious state [<xref ref-type="bibr" rid="ref13">13</xref>]</td>
                  <td>–0.03</td>
                  <td>.18</td>
                  <td colspan="2">–0.02</td>
                  <td>.53</td>
                  <td colspan="2">–0.04</td>
                  <td>.25</td>
                </tr>
                <tr valign="top">
                  <td>F2<sup>c</sup> mean</td>
                  <td>Showed a significant change between neutral state and anxious state [<xref ref-type="bibr" rid="ref13">13</xref>]</td>
                  <td>0.004</td>
                  <td>.85</td>
                  <td colspan="2">0.04</td>
                  <td>.26</td>
                  <td colspan="2">–0.04</td>
                  <td>.22</td>
                </tr>
                <tr valign="top">
                  <td>F2 SD</td>
                  <td>Showed a significant change between neutral state and anxious state [<xref ref-type="bibr" rid="ref13">13</xref>]</td>
                  <td>0.01</td>
                  <td>.59</td>
                  <td colspan="2">0.03</td>
                  <td>.38</td>
                  <td colspan="2">–0.02</td>
                  <td>.60</td>
                </tr>
                <tr valign="top">
                  <td>F3<sup>d</sup> mean</td>
                  <td>Showed a significant change between neutral state and anxious state [<xref ref-type="bibr" rid="ref13">13</xref>]</td>
                  <td>0.02</td>
                  <td>.49</td>
                  <td colspan="2">0.04</td>
                  <td>.21</td>
                  <td colspan="2">–0.01</td>
                  <td>.72</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table4fn1">
                <p><sup>a</sup>ZCR-zPSD: zero crossing rate for the <italic>z</italic> score of the power spectral density.</p>
              </fn>
              <fn id="table4fn2">
                <p><sup>b</sup>F1: first formant.</p>
              </fn>
              <fn id="table4fn3">
                <p><sup>c</sup>F2: second formant.</p>
              </fn>
              <fn id="table4fn4">
                <p><sup>d</sup>F3: third formant.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
          <table-wrap position="float" id="table5">
            <label>Table 5</label>
            <caption>
              <p>Comparison of previous works’ correlations with those of this study.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="160"/>
              <col width="180"/>
              <col width="180"/>
              <col width="80"/>
              <col width="80"/>
              <col width="80"/>
              <col width="80"/>
              <col width="80"/>
              <col width="80"/>
              <thead>
                <tr valign="top">
                  <td>Feature</td>
                  <td colspan="2">Previous work</td>
                  <td colspan="6">This study</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td colspan="2">
                    <break/>
                  </td>
                  <td colspan="2">All samples</td>
                  <td colspan="2">Female samples</td>
                  <td colspan="2">Male samples</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                  <td>
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                  <td>
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                  <td>
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td>Speaking duration</td>
                  <td>–0.36</td>
                  <td>&#60;.01</td>
                  <td>–0.12</td>
                  <td>&#60;.001</td>
                  <td>–0.11</td>
                  <td>&#60;.001</td>
                  <td>–0.13</td>
                  <td>&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>MFCC<sup>a</sup>_std_1</td>
                  <td>–0.36</td>
                  <td>&#60;.05</td>
                  <td>0.01</td>
                  <td>.54</td>
                  <td>0.02</td>
                  <td>.61</td>
                  <td>0.02</td>
                  <td>.52</td>
                </tr>
                <tr valign="top">
                  <td>F0<sup>b</sup>_mean</td>
                  <td>Female: 0.02; male: 0.72</td>
                  <td>Female: 0.92; male: 0.002</td>
                  <td>0.02</td>
                  <td>.37</td>
                  <td>–0.03</td>
                  <td>.33</td>
                  <td>0.06</td>
                  <td>.06</td>
                </tr>
                <tr valign="top">
                  <td>F0_SD</td>
                  <td>–0.24</td>
                  <td>&#60;.05</td>
                  <td>0.06</td>
                  <td>.01</td>
                  <td>0.03</td>
                  <td>.30</td>
                  <td>0.07</td>
                  <td>.03</td>
                </tr>
                <tr valign="top">
                  <td>Intensity mean</td>
                  <td>–0.2</td>
                  <td>—<sup>c</sup></td>
                  <td>–0.04</td>
                  <td>.13</td>
                  <td>–0.09</td>
                  <td>.01</td>
                  <td>0.01</td>
                  <td>.72</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table5fn1">
                <p><sup>a</sup>MFCC: mel-frequency cepstral coefficient.</p>
              </fn>
              <fn id="table5fn2">
                <p><sup>b</sup>F0: fundamental frequency.</p>
              </fn>
              <fn id="table5fn3">
                <p><sup>c</sup>Not available.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
        <sec>
          <title>Linguistic Feature Correlation With the GAD-7</title>
          <p>The quality of the transcript produced using Amazon’s AWS STT program [<xref ref-type="bibr" rid="ref42">42</xref>] was analyzed by comparing the transcript produced from the task 1 audio with the actual <italic>My Grandfather</italic> passage. The word error rate was calculated, and the STT transcript had an average word error rate of 7% (SD 4.6%).</p>
          <p><xref ref-type="table" rid="table6">Table 6</xref> presents the set of linguistic features (described in the <italic>Linguistic Features</italic> section) that had <italic>P</italic> values &#60;.05 for the same 3 data sets: all participants, male-only participants, and female-only participants. Each section in the table is sorted in decreasing order of absolute value of correlation. As described previously, the partial correlations account for age and personal income across all data sets, and we also controlled for sex in the full data set.</p>
          <table-wrap position="float" id="table6">
            <label>Table 6</label>
            <caption>
              <p>Correlation of significant Linguistic Inquiry and Word Count linguistic features with the Generalized Anxiety Disorder 7-item scale.</p>
            </caption>
            <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
              <col width="30"/>
              <col width="470"/>
              <col width="0"/>
              <col width="250"/>
              <col width="0"/>
              <col width="250"/>
              <thead>
                <tr valign="top">
                  <td colspan="3">Sample and feature</td>
                  <td colspan="2">
                    <italic>r</italic>
                  </td>
                  <td><italic>P</italic> value</td>
                </tr>
              </thead>
              <tbody>
                <tr valign="top">
                  <td colspan="6">
                    <bold>All samples (N=1744)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>AllPunc</td>
                  <td colspan="2">0.13</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Period</td>
                  <td colspan="2">0.12</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>assent</td>
                  <td colspan="2">0.10</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>negemo</td>
                  <td colspan="2">0.10</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>relativ</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>motion</td>
                  <td colspan="2">–0.08</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>swear</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>anger</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>focusfuture</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.003</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>adverb</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.004</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>time</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.004</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>function</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.005</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>negate</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.006</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>prep</td>
                  <td colspan="2">–0.06</td>
                  <td colspan="2">.007</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>WPS<sup>a</sup></td>
                  <td colspan="2">–0.06</td>
                  <td colspan="2">.007</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>anx</td>
                  <td colspan="2">0.06</td>
                  <td colspan="2">.008</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>hear</td>
                  <td colspan="2">0.06</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>death</td>
                  <td colspan="2">0.06</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>ipron</td>
                  <td colspan="2">–0.06</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>see</td>
                  <td colspan="2">–0.06</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>affect</td>
                  <td colspan="2">0.06</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>i</td>
                  <td colspan="2">0.05</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>family</td>
                  <td colspan="2">0.05</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>sad</td>
                  <td colspan="2">0.05</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>ppron</td>
                  <td colspan="2">0.05</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>space</td>
                  <td colspan="2">–0.05</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>article</td>
                  <td colspan="2">–0.05</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>leisure</td>
                  <td colspan="2">0.05</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>friend</td>
                  <td colspan="2">0.05</td>
                  <td colspan="2">.047</td>
                </tr>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Female samples (n=862)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Period</td>
                  <td colspan="2">0.16</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>AllPunc</td>
                  <td colspan="2">0.14</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>adverb</td>
                  <td colspan="2">–0.11</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>negemo</td>
                  <td colspan="2">0.11</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>anger</td>
                  <td colspan="2">0.11</td>
                  <td colspan="2">.002</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>motion</td>
                  <td colspan="2">–0.10</td>
                  <td colspan="2">.003</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>assent</td>
                  <td colspan="2">0.10</td>
                  <td colspan="2">.004</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>see</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.006</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>relativ</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.006</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>sad</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Dic</td>
                  <td colspan="2">–0.08</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>power</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>WPS</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.03</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>death</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>percept</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.046</td>
                </tr>
                <tr valign="top">
                  <td colspan="6">
                    <bold>Male samples (n=882)</bold>
                  </td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>AllPunc</td>
                  <td colspan="2">0.13</td>
                  <td colspan="2">&#60;.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>assent</td>
                  <td colspan="2">0.11</td>
                  <td colspan="2">.001</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>relativ</td>
                  <td colspan="2">–0.10</td>
                  <td colspan="2">.002</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>leisure</td>
                  <td colspan="2">0.10</td>
                  <td colspan="2">.002</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>hear</td>
                  <td colspan="2">0.10</td>
                  <td colspan="2">.003</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>swear</td>
                  <td colspan="2">0.10</td>
                  <td colspan="2">.004</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>time</td>
                  <td colspan="2">–0.10</td>
                  <td colspan="2">.004</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Apostro</td>
                  <td colspan="2">0.09</td>
                  <td colspan="2">.005</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>power</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>ppron</td>
                  <td colspan="2">0.09</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Sixltr</td>
                  <td colspan="2">–0.09</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>anx</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>negate</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>negemo</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>article</td>
                  <td colspan="2">–0.08</td>
                  <td colspan="2">.01</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>Period</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>prep</td>
                  <td colspan="2">–0.08</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>focusfuture</td>
                  <td colspan="2">–0.08</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>family</td>
                  <td colspan="2">0.08</td>
                  <td colspan="2">.02</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>ipron</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>affect</td>
                  <td colspan="2">0.07</td>
                  <td colspan="2">.04</td>
                </tr>
                <tr valign="top">
                  <td>
                    <break/>
                  </td>
                  <td>motion</td>
                  <td colspan="2">–0.07</td>
                  <td colspan="2">.048</td>
                </tr>
              </tbody>
            </table>
            <table-wrap-foot>
              <fn id="table6fn1">
                <p><sup>a</sup>WPS: words per sentence.</p>
              </fn>
            </table-wrap-foot>
          </table-wrap>
        </sec>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <sec>
          <title>Overview</title>
          <p>Our central objective was to test specific acoustic and linguistic features of impromptu speech for their association with anxiety and to do so with a larger number of participants. In this section, we discuss the implications of the findings presented in the previous section, as well as the limitations of the study.</p>
          <p>The results presented in the <italic>Results</italic> section quantified the relationship between features computed from recorded speech and the self-reported GAD-7 score using Pearson correlation coefficients, controlling for age and income. The results show several significant correlations between features extracted from speech and anxiety, which can help to inform future efforts in the automatic monitoring of anxiety. We discuss these in the following sections.</p>
        </sec>
        <sec>
          <title>Recruitment and Data Inclusion</title>
          <p><xref rid="figure1" ref-type="fig">Figure 1</xref>, the study recruitment flow chart, shows that the recruitment yield was 48.7% (2212/4542). Regarding the 51.3% (2330/4542) of participants who dropped out after accepting the study, we can only speculate as to why. Some may have been unwilling to have their words audio recorded or their full video recorded, and although the consent form makes this task clear, it may be that the participants who dropped out only really understood this when they saw their video on the screen.</p>
          <p>We also conducted a missingness analysis on the 4.98% (256/4542) of samples excluded from the study (<xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>). The results show that in the excluded data, the mention of words related to anxiety and those related to home had a significant positive correlation with anxiety and the count of longer words (&#62;6 letters) was negatively correlated with anxiety. We found similar positive and negative correlations of these features in the 38.4% (1744/4542) samples included in our analysis. This indicates that excluding the 256 samples did not affect the correlation results.</p>
        </sec>
        <sec>
          <title>Demographics of Participants</title>
          <p>The proportion of participants in the group with anxiety (those above the GAD-7 screening threshold of 10) was 30.96% (540/1744), which is much higher than the general population rate of approximately 10% [<xref ref-type="bibr" rid="ref1">1</xref>]. This result, indicating that English speakers recruited from Prolific have elevated rates of anxiety and depression, is consistent with our prior studies using recruits from Prolific and suggests that this population exhibits a higher incidence of anxiety [<xref ref-type="bibr" rid="ref23">23</xref>,<xref ref-type="bibr" rid="ref49">49</xref>-<xref ref-type="bibr" rid="ref51">51</xref>]. <xref ref-type="table" rid="table1">Table 1</xref> sheds some light on this difference: it shows that a similar high fraction of participants self-reported on their Prolific profile that they have an ongoing mental health condition.</p>
          <p>The demographic data listed in <xref ref-type="table" rid="table1">Table 1</xref> provide several interesting insights into the recruited cohort with respect to the presence or absence of above-threshold GAD-7 scores. First, there was a significantly larger proportion of women in the group with anxiety than men. This is consistent with previous findings suggesting that anxiety is more prevalent in women than in men [<xref ref-type="bibr" rid="ref43">43</xref>]. We feel that this confirms that it is useful to consider separate female-only and male-only data sets to avoid the bias introduced by sex when exploring features that may correlate with the GAD-7. For example, pitch (F0) would typically be higher for women, and as a result, sex effects could easily confound the association between pitch and anxiety.</p>
          <p>The rows in <xref ref-type="table" rid="table1">Table 1</xref> that show the proportions of participants classified as anxious and nonanxious by income suggest that there is a relationship between income and anxiety: the 2 very lowest categories of income show a disproportionately higher amount of anxiety. There is a downward trend in anxiety with income until the very last category, which is ≥£60,000 (US $82,200). It is interesting that above a certain income level, anxiety seems to increase, although this is consistent with prior studies on anxiety and income [<xref ref-type="bibr" rid="ref45">45</xref>].</p>
          <p>Similarly, with respect to age, younger participants were more likely to be in the group with anxiety, which is consistent with previous work [<xref ref-type="bibr" rid="ref44">44</xref>].</p>
        </sec>
        <sec>
          <title>Posttask Self-report Anxiety Measure</title>
          <p>As described in the <italic>Anxiety Measures</italic> section, we used the posttask self-reported anxiety measure as an internal check to see whether task 2 (the modified TSST task) induced more self-reported anxiety than task 1. A paired <italic>t</italic> test conducted on the 2 informal ratings of anxiety of the 2 tasks had a <italic>P</italic> value of &#60;.001, indicating a significant difference and implying that task 2 induced greater anxiety. Recall that most of the prior work discussed in the <italic>Related Work</italic> section also used mood induction tasks.</p>
        </sec>
        <sec>
          <title>Amount of Speech</title>
          <p>The results suggest that features related to the amount of speech that the participants delivered in response to task 2 had one of the highest correlations with their GAD-7 response across all the features explored in this work. In particular, 2 features captured this aspect: <italic>speaking duration</italic> and <italic>word count</italic>, as shown in <xref ref-type="table" rid="table2">Table 2</xref> (their intercorrelation with each other is presented in <xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref>). In all cases, the negative direction of the correlation suggests that participants who spoke more tended to have lower GAD-7 scores. This result is consistent with previous work, as shown in the first data row of <xref ref-type="table" rid="table5">Table 5</xref>; however, our study gives a much lower Pearson correlation than prior work (<italic>r</italic>=0.12 in this study vs <italic>r</italic>=0.36 in the study by Laukka et al [<xref ref-type="bibr" rid="ref16">16</xref>]). We speculate that the more anxious a person is, the less confidence they would have about their speech; therefore, perhaps, they speak less.</p>
        </sec>
        <sec>
          <title>Acoustic Features</title>
          <p>The main purpose of this work was to explore how acoustic features relate to anxiety. We wanted to determine whether associations found in previous studies still hold with the larger sample size. <xref ref-type="table" rid="table3">Table 3</xref> lists the features that have significant correlations, with <italic>P</italic>&#60;.05, across all 3 data sets. The features with the strongest correlation in this set were <italic>shimmer</italic> on the all-sample data set and the SDs of the second and third MFCCs for the male-sample and female-sample data sets, respectively. We note that there are multiple parameters used in the extraction of MFCC features; therefore, a direct comparison of the specific MFCC features of our study with specific features of previous work is not possible as the prior work does not provide the exact parameters used to compute the MFCCs. The parameters used in this study are provided in the <italic>Acoustic Features</italic> section under the <italic>Methods</italic> section. That being said, in previous research, the fourth MFCC was the most significant among the 13 MFCC features in the study by Özseven et al [<xref ref-type="bibr" rid="ref13">13</xref>] and the SD of the first MFCC in the study by Wörtwein et al [<xref ref-type="bibr" rid="ref20">20</xref>] had a significant correlation (<italic>r</italic>=–0.36; <italic>P</italic>&#60;.05) with an anxiety scale. These results, from both our study and previous work, suggest that signals of anxiety are present in the MFCC features.</p>
          <p>The following features, listed as relevant in prior work, did not show significant correlations with the GAD-7: F2 and F3, jitter, ZCR-zPSD, and the articulation rate. <xref ref-type="table" rid="table4">Table 4</xref> presents prior works’ associations with anxiety regarding these features and the correlation values obtained in our study. It is important to note that in previous research, these features were noted as significant or relevant; however, no correlations with an indicator of anxiety were provided. This makes it difficult to compare directly with the correlations obtained in our study.</p>
        </sec>
        <sec>
          <title>Linguistic Features</title>
          <p>Correlations between linguistic features extracted using the LIWC dictionaries [<xref ref-type="bibr" rid="ref24">24</xref>] and the GAD-7 have been presented in the <italic>Results</italic> section. These had a higher correlation than the acoustic features, as presented in <xref ref-type="table" rid="table6">Table 6</xref>. The top LIWC category with the highest correlation in all the data sets is the count of punctuations. This includes the count of periods, which would indicate the number of separate sentences. The count of periods together with a negative correlation of words per sentence indicates that the use of shorter sentences is positively associated with anxiety.</p>
          <p>Other LIWC categories with high correlation in the all-sample data set were negative emotion (<italic>negemo</italic>; eg, hurt, ugly, and nasty), anger (<italic>anger</italic>; eg, hate, kill, and annoyed), anxiety (<italic>anx</italic>; eg, worried and fearful), and sad (<italic>sad</italic>; eg, crying, grief, and sad). The anger, anxiety, and sad categories were constituent subsets of the negative emotion (<italic>negemo</italic>) category; that is, words counted under one of the anger, anxiety, or sad categories were also counted for the <italic>negemo</italic> category. The high intercorrelation with each other is shown in <xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref>. The <italic>negemo</italic> count had a higher correlation than these individual subcategories, suggesting that words related to anger, anxiety, and sad captured different dimensions of self-reported anxiety.</p>
          <p>An LIWC category with a significant correlation that is present in the male-sample data set but not in the female-sample data set is the use of apostrophes (<italic>apostro</italic>), indicating that words with contractions (such as <italic>I’ll</italic>) were positively associated with the GAD-7. In addition, only for men, function words, including personal pronouns (<italic>ppron</italic>), had a significant positive correlation with anxiety. We speculate that male individuals with anxiety might use personal pronouns (which include I, me, and mine) to divert their attention from the anxiety-inducing event and focus on themselves. More generally, the increased use of personal pronouns has been shown to occur in individuals with depression [<xref ref-type="bibr" rid="ref52">52</xref>], a highly comorbid mental health illness with GAD (but not only for men).</p>
          <p>Another differentiation between men and women occurs in the LIWC feature for words related to <italic>power</italic> (eg, superior and bully). The <italic>power</italic> count had a positive correlation with the GAD-7 for women and a negative correlation for men. We speculate that the negative correlation is somehow related to the stereotypical dominance behavior associated with men.</p>
          <p>In prior work studying associations between LIWC scores and anxiety, words related to anxiety and first-person singular pronouns were shown to be significantly associated with social anxiety [<xref ref-type="bibr" rid="ref25">25</xref>], similar to our results. The same work has also shown that perceptual process words (see, hear, and feel) are significantly associated with anxiety, which does not align with our results. For example, the LIWC category for <italic>see</italic> has a negative correlation in both the all-sample and the female-sample data sets (as shown in <xref ref-type="table" rid="table6">Table 6</xref>). However, in the study by Di Matteo et al [<xref ref-type="bibr" rid="ref23">23</xref>], the category <italic>see</italic> had a positive correlation (<italic>r</italic>=0.31; <italic>P</italic>=.02) with a social anxiety measure. We speculate that the use of perceptual process words (eg, <italic>see</italic>) might be a differentiating factor between social anxiety and GAD as it was positively correlated in the former and negatively correlated in the latter. By contrast, the LIWC category for the perceptual process word <italic>hear</italic> had a positive correlation in both the all-sample and the male-sample data set (also shown in <xref ref-type="table" rid="table6">Table 6</xref>). Notice that both <italic>see</italic> and <italic>hear</italic> are perceptual processes; however, the category for <italic>see</italic> is significant for women, whereas the category for <italic>hear</italic> is significant for men.</p>
          <p>Furthermore, in prior work, death-related words were shown to have a positive correlation with anxiety [<xref ref-type="bibr" rid="ref23">23</xref>]. Our results (as shown in <xref ref-type="table" rid="table6">Table 6</xref>) show a similar trend where death-related words had a significant positive correlation in the male-sample and all-sample data sets. However, a significant correlation was not observed in the female-sample data set.</p>
          <p>The fact that there are several single-word categories that have significant correlations suggests that techniques that are able to look at multiple word meanings may have greater potential in making predictions.</p>
        </sec>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>A limitation of this study is the use of self-report measures to assess GAD. Self-report measures, by nature, are subjective opinions that individuals have about themselves while filling out the questionnaires and may not completely capture clinical symptoms. In this study, we took these self-report questionnaires as the true label of the audio samples. However, we believe that this is a good first step that gave us encouraging preliminary results. A psychiatric diagnosis would be an improved label but is clearly much more expensive to acquire.</p>
        <p>A further limitation of this study is the selection bias that might be introduced during the recruitment of the participants. As presented in <xref rid="figure1" ref-type="fig">Figure 1</xref>, only 48.7% (2212/4542) of the participants who initially accepted the offer from Prolific to participate finished the study. We were not able to collect the GAD-7 scores of the participants who did not complete the study; therefore, we do not know their levels of anxiety. It is possible that these participants had higher levels of anxiety, which caused them to drop out of the study.</p>
        <p>Another limitation concerns the differences in the recording devices and recording locations of the participants performing each task. Ideally, we would want every sample to be recorded using the same microphone in the same location with the same acoustics. This would reduce the potential bias introduced by different factors such as recording quality or background noise. At the same time, in a real-life scenario where an application to detect anxiety might be deployed, the recording equipment and the location will likely differ for everyone. Hence, this limitation could be unavoidable, and it might even be essential to take these types of differences into consideration.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>We present results from a large-N study examining the relationship between speech and GAD. Our data collection relied on participants using home recording devices, hence capturing variations in acoustic environments, which will need to be factored in when deploying tools for the detection of mental health disorders in the wild. Our goal was to provide a useful benchmark for future research by assessing the extent to which results from previous research are generalizable to our data collection approach and larger data set. We tested the most common acoustic and linguistic features associated with anxiety in previous studies and provided detailed correlation tables broken down by demographics.</p>
        <p>Our findings are decidedly mixed. On the one hand, with our larger data set, we found modest correlations between anxiety and several features of speech, including speaking duration and acoustic features such as MFCCs, LPCCS, shimmer, F0, and F1. However, other features shown to correlate with anxiety elsewhere—including F2 and F3, jitter, and ZCR-zPSD—were not significantly associated with anxiety in our study. Although these null findings do not entirely rule out the potential of more sophisticated learning models for this task, we believe that researchers should be wary of inherent difficulties. Readers should also note that our data collection already sidestepped additional challenges that we expected to influence the detection of anxiety disorders from speech, such as variations in accents, dialects, and spoken language. On the other hand, we found statistically significant correlations for a subset of speech features from previous research. This suggests that there may be a fundamental pathway between anxiety and the production of speech, one that is robust enough to be generalized to the population.</p>
        <p>Future investigations could explore whether features of speech from task 1 (simple reading of a passage) exhibit correlations with the GAD-7 or whether these features could be used as a control for the features of task 2 (the modified TSST task). It may also be informative to separate out different age groups (eg, younger and older) to see whether there is a specific impact of speech features on the GAD-7.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Web application screenshot.</p>
        <media xlink:href="mental_v9i7e36828_app1.pdf" xlink:title="PDF File  (Adobe PDF File), 444 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>My Grandfather passage.</p>
        <media xlink:href="mental_v9i7e36828_app2.pdf" xlink:title="PDF File  (Adobe PDF File), 28 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Speech encouragement statements.</p>
        <media xlink:href="mental_v9i7e36828_app3.pdf" xlink:title="PDF File  (Adobe PDF File), 34 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Excluded data analysis.</p>
        <media xlink:href="mental_v9i7e36828_app4.pdf" xlink:title="PDF File  (Adobe PDF File), 43 KB"/>
      </supplementary-material>
      <supplementary-material id="app5">
        <label>Multimedia Appendix 5</label>
        <p>Correlation between demographics and acoustic and linguistic features.</p>
        <media xlink:href="mental_v9i7e36828_app5.pdf" xlink:title="PDF File  (Adobe PDF File), 103 KB"/>
      </supplementary-material>
      <supplementary-material id="app6">
        <label>Multimedia Appendix 6</label>
        <p>Significant feature intercorrelations of the all-sample data set.</p>
        <media xlink:href="mental_v9i7e36828_app6.xlsx" xlink:title="XLSX File  (Microsoft Excel File), 21 KB"/>
      </supplementary-material>
      <supplementary-material id="app7">
        <label>Multimedia Appendix 7</label>
        <p>Significant feature intercorrelations of the female-sample data set.</p>
        <media xlink:href="mental_v9i7e36828_app7.xlsx" xlink:title="XLSX File  (Microsoft Excel File), 17 KB"/>
      </supplementary-material>
      <supplementary-material id="app8">
        <label>Multimedia Appendix 8</label>
        <p>Significant feature intercorrelations of the male-sample data set.</p>
        <media xlink:href="mental_v9i7e36828_app8.xlsx" xlink:title="XLSX File  (Microsoft Excel File), 18 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">F0</term>
          <def>
            <p>fundamental frequency</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">F1</term>
          <def>
            <p>first formant</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">F2</term>
          <def>
            <p>second formant</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">F3</term>
          <def>
            <p>third formant</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">GAD</term>
          <def>
            <p>generalized anxiety disorder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">GAD-7</term>
          <def>
            <p>Generalized Anxiety Disorder 7-item scale</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">LIWC</term>
          <def>
            <p>Linguistic Inquiry and Word Count</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">LPCC</term>
          <def>
            <p>linear prediction cepstral coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">MFCC</term>
          <def>
            <p>mel-frequency cepstral coefficient</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">SAD</term>
          <def>
            <p>social anxiety disorder</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">STT</term>
          <def>
            <p>speech-to-text</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">TSST</term>
          <def>
            <p>Trier Social Stress Test</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">ZCR-zPSD</term>
          <def>
            <p>zero crossing rate for the z score of the power spectral density</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This research was funded by a University of Toronto XSeed Grant, Natural Sciences and Engineering Research Council of Canada Discovery Grant (RGPIN-2019-04395), and Social Sciences and Humanities Research Council Partnership Engage Grant (892-2019-0011).</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>WS is an employee of Winterlight Labs and hold equity within the company, and DDD is a former employee of Winterlight Labs.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Mental Health - Anxiety Disorders</article-title>
          <source>Public Health Canada</source>
          <year>2009</year>
          <access-date>2022-01-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.canada.ca/en/health-canada/services/healthy-living/your-health/diseases/mental-health-anxiety-disorders.html">https://www.canada.ca/en/health-canada/services/healthy-living/your-health/diseases/mental-health-anxiety-disorders.html</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Roberge</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Fournier</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Duhoux</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Nguyen</surname>
              <given-names>CT</given-names>
            </name>
            <name name-style="western">
              <surname>Smolders</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Mental health service use and treatment adequacy for anxiety disorders in Canada</article-title>
          <source>Soc Psychiatry Psychiatr Epidemiol</source>
          <year>2011</year>
          <month>04</month>
          <volume>46</volume>
          <issue>4</issue>
          <fpage>321</fpage>
          <lpage>30</lpage>
          <pub-id pub-id-type="doi">10.1007/s00127-010-0186-2</pub-id>
          <pub-id pub-id-type="medline">20217041</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Koerner</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Dugas</surname>
              <given-names>MJ</given-names>
            </name>
            <name name-style="western">
              <surname>Savard</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Gaudet</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Turcotte</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Marchand</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>The economic burden of anxiety disorders in Canada</article-title>
          <source>Can Psychol</source>
          <year>2004</year>
          <volume>45</volume>
          <issue>3</issue>
          <fpage>191</fpage>
          <lpage>201</lpage>
          <pub-id pub-id-type="doi">10.1037/h0088236</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hidalgo</surname>
              <given-names>RB</given-names>
            </name>
            <name name-style="western">
              <surname>Sheehan</surname>
              <given-names>DV</given-names>
            </name>
          </person-group>
          <article-title>Generalized anxiety disorder</article-title>
          <source>Handb Clin Neurol</source>
          <year>2012</year>
          <volume>106</volume>
          <fpage>343</fpage>
          <lpage>62</lpage>
          <pub-id pub-id-type="doi">10.1016/B978-0-444-52002-9.00019-X</pub-id>
          <pub-id pub-id-type="medline">22608630</pub-id>
          <pub-id pub-id-type="pii">B978-0-444-52002-9.00019-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hoehn-Saric</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>McLeod</surname>
              <given-names>DR</given-names>
            </name>
          </person-group>
          <article-title>The peripheral sympathetic nervous system. Its role in normal and pathologic anxiety</article-title>
          <source>Psychiatr Clin North Am</source>
          <year>1988</year>
          <month>06</month>
          <volume>11</volume>
          <issue>2</issue>
          <fpage>375</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="medline">3047706</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thompson</surname>
              <given-names>AR</given-names>
            </name>
          </person-group>
          <article-title>Pharmacological agents with effects on voice</article-title>
          <source>Am J Otolaryngol</source>
          <year>1995</year>
          <volume>16</volume>
          <issue>1</issue>
          <fpage>12</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1016/0196-0709(95)90003-9</pub-id>
          <pub-id pub-id-type="medline">7717466</pub-id>
          <pub-id pub-id-type="pii">0196-0709(95)90003-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barrett</surname>
              <given-names>LF</given-names>
            </name>
          </person-group>
          <source>How Emotions Are Made: The Secret Life of the Brain</source>
          <year>2018</year>
          <publisher-loc>Boston, MA, USA</publisher-loc>
          <publisher-name>Mariner Books</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McGinnis</surname>
              <given-names>EW</given-names>
            </name>
            <name name-style="western">
              <surname>Anderau</surname>
              <given-names>SP</given-names>
            </name>
            <name name-style="western">
              <surname>Hruschak</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Gurchiek</surname>
              <given-names>RD</given-names>
            </name>
            <name name-style="western">
              <surname>Lopez-Duran</surname>
              <given-names>NL</given-names>
            </name>
            <name name-style="western">
              <surname>Fitzgerald</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenblum</surname>
              <given-names>KL</given-names>
            </name>
            <name name-style="western">
              <surname>Muzik</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>McGinnis</surname>
              <given-names>RS</given-names>
            </name>
          </person-group>
          <article-title>Giving voice to vulnerable children: machine learning analysis of speech detects anxiety and depression in early childhood</article-title>
          <source>IEEE J Biomed Health Inform</source>
          <year>2019</year>
          <month>11</month>
          <volume>23</volume>
          <issue>6</issue>
          <fpage>2294</fpage>
          <lpage>301</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/31034426"/>
          </comment>
          <pub-id pub-id-type="doi">10.1109/JBHI.2019.2913590</pub-id>
          <pub-id pub-id-type="medline">31034426</pub-id>
          <pub-id pub-id-type="pmcid">PMC7484854</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Buske-Kirschbaum</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Jobst</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Wustmans</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kirschbaum</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Rauh</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Hellhammer</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Attenuated free cortisol response to psychosocial stress in children with atopic dermatitis</article-title>
          <source>Psychosom Med</source>
          <year>1997</year>
          <volume>59</volume>
          <issue>4</issue>
          <fpage>419</fpage>
          <lpage>26</lpage>
          <pub-id pub-id-type="doi">10.1097/00006842-199707000-00012</pub-id>
          <pub-id pub-id-type="medline">9251162</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tanweer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Khalid</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rao</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Mel frequency cepstral coefficient: a review</article-title>
          <source>Proceedings of the 2nd International Conference on ICT for Digital, Smart, and Sustainable Development</source>
          <year>2020</year>
          <conf-name>ICIDSSD '20</conf-name>
          <conf-date>February 27-28, 2020</conf-date>
          <conf-loc>New Delhi, India</conf-loc>
          <pub-id pub-id-type="doi">10.4108/eai.27-2-2020.2303173</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davies</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Bouldin</surname>
              <given-names>DW</given-names>
            </name>
          </person-group>
          <article-title>A cluster separation measure</article-title>
          <source>IEEE Trans Pattern Anal Mach Intell</source>
          <year>1979</year>
          <month>4</month>
          <volume>PAMI-1</volume>
          <issue>2</issue>
          <fpage>224</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1109/tpami.1979.4766909</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cortes</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Vapnik</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Support-vector networks</article-title>
          <source>Mach Learn</source>
          <year>1995</year>
          <month>9</month>
          <volume>20</volume>
          <issue>3</issue>
          <fpage>273</fpage>
          <lpage>97</lpage>
          <pub-id pub-id-type="doi">10.1007/BF00994018</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Özseven</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Dügenci</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Doruk</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kahraman</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Voice traces of anxiety: acoustic parameters affected by anxiety disorder</article-title>
          <source>Arch Acoust</source>
          <year>2018</year>
          <volume>43</volume>
          <issue>4</issue>
          <fpage>625</fpage>
          <lpage>36</lpage>
          <pub-id pub-id-type="doi">10.24425/AOA.2018.125156</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Weeks</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>CY</given-names>
            </name>
            <name name-style="western">
              <surname>Reilly</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Howell</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>France</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Kowalsky</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Bush</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>"The Sound of Fear": assessing vocal fundamental frequency as a physiological indicator of social anxiety disorder</article-title>
          <source>J Anxiety Disord</source>
          <year>2012</year>
          <month>12</month>
          <volume>26</volume>
          <issue>8</issue>
          <fpage>811</fpage>
          <lpage>22</lpage>
          <pub-id pub-id-type="doi">10.1016/j.janxdis.2012.07.005</pub-id>
          <pub-id pub-id-type="medline">23070030</pub-id>
          <pub-id pub-id-type="pii">S0887-6185(12)00093-X</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Julian</surname>
              <given-names>LJ</given-names>
            </name>
          </person-group>
          <article-title>Measures of anxiety: State-Trait Anxiety Inventory (STAI), Beck Anxiety Inventory (BAI), and Hospital Anxiety and Depression Scale-Anxiety (HADS-A)</article-title>
          <source>Arthritis Care Res (Hoboken)</source>
          <year>2011</year>
          <month>11</month>
          <volume>63 Suppl 11</volume>
          <fpage>S467</fpage>
          <lpage>72</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1002/acr.20561"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/acr.20561</pub-id>
          <pub-id pub-id-type="medline">22588767</pub-id>
          <pub-id pub-id-type="pmcid">PMC3879951</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laukka</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Linnman</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Åhs</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Pissiota</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Frans</surname>
              <given-names>Ö</given-names>
            </name>
            <name name-style="western">
              <surname>Faria</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Michelgård</surname>
              <given-names>Å</given-names>
            </name>
            <name name-style="western">
              <surname>Appel</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Fredrikson</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Furmark</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>In a nervous voice: acoustic analysis and perception of anxiety in social phobics’ speech</article-title>
          <source>J Nonverbal Behav</source>
          <year>2008</year>
          <month>7</month>
          <day>18</day>
          <volume>32</volume>
          <issue>4</issue>
          <fpage>195</fpage>
          <lpage>214</lpage>
          <pub-id pub-id-type="doi">10.1007/s10919-008-0055-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Spielberger</surname>
              <given-names>CD</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Weiner</surname>
              <given-names>IB</given-names>
            </name>
            <name name-style="western">
              <surname>Craighead</surname>
              <given-names>WE</given-names>
            </name>
          </person-group>
          <article-title>State-trait anxiety inventory</article-title>
          <source>The Corsini Encyclopedia of Psychology</source>
          <year>2010</year>
          <publisher-loc>Hoboken, NJ, USA</publisher-loc>
          <publisher-name>John Wiley &#38; Sons</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Albuquerque</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Valente</surname>
              <given-names>AR</given-names>
            </name>
            <name name-style="western">
              <surname>Teixeira</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Figueiredo</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Sa-Couto</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Oliveira</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Association between acoustic speech features and non-severe levels of anxiety and depression symptoms across lifespan</article-title>
          <source>PLoS One</source>
          <year>2021</year>
          <month>4</month>
          <day>8</day>
          <volume>16</volume>
          <issue>4</issue>
          <fpage>e0248842</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0248842"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0248842</pub-id>
          <pub-id pub-id-type="medline">33831018</pub-id>
          <pub-id pub-id-type="pii">PONE-D-20-20587</pub-id>
          <pub-id pub-id-type="pmcid">PMC8031302</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zigmond</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Snaith</surname>
              <given-names>RP</given-names>
            </name>
          </person-group>
          <article-title>The hospital anxiety and depression scale</article-title>
          <source>Acta Psychiatr Scand</source>
          <year>1983</year>
          <month>06</month>
          <volume>67</volume>
          <issue>6</issue>
          <fpage>361</fpage>
          <lpage>70</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1600-0447.1983.tb09716.x</pub-id>
          <pub-id pub-id-type="medline">6880820</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wortwein</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Morency</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Automatic assessment and analysis of public speaking anxiety: a virtual audience case study</article-title>
          <source>Proceedings of the 2015 International Conference on Affective Computing and Intelligent Interaction</source>
          <year>2015</year>
          <conf-name>ACII '15</conf-name>
          <conf-date>September 21-24, 2015</conf-date>
          <conf-loc>Xi'an, China</conf-loc>
          <fpage>187</fpage>
          <lpage>93</lpage>
          <pub-id pub-id-type="doi">10.1109/acii.2015.7344570</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gilkinson</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Social fears as reported by students in college speech classes∗</article-title>
          <source>Speech Monogr</source>
          <year>1942</year>
          <month>01</month>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>141</fpage>
          <lpage>60</lpage>
          <pub-id pub-id-type="doi">10.1080/03637754209390068</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hagenaars</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>van Minnen</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>The effect of fear on paralinguistic aspects of speech in patients with panic disorder with agoraphobia</article-title>
          <source>J Anxiety Disord</source>
          <year>2005</year>
          <volume>19</volume>
          <issue>5</issue>
          <fpage>521</fpage>
          <lpage>37</lpage>
          <pub-id pub-id-type="doi">10.1016/j.janxdis.2004.04.008</pub-id>
          <pub-id pub-id-type="medline">15749571</pub-id>
          <pub-id pub-id-type="pii">S0887618504000453</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Di Matteo</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>W</given-names>
            </name>
            <name name-style="western">
              <surname>Fotinos</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lokuge</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sternat</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Katzman</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Rose</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Smartphone-detected ambient speech and self-reported measures of anxiety and depression: exploratory observational study</article-title>
          <source>JMIR Form Res</source>
          <year>2021</year>
          <month>01</month>
          <day>29</day>
          <volume>5</volume>
          <issue>1</issue>
          <fpage>e22723</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://formative.jmir.org/2021/1/e22723/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/22723</pub-id>
          <pub-id pub-id-type="medline">33512325</pub-id>
          <pub-id pub-id-type="pii">v5i1e22723</pub-id>
          <pub-id pub-id-type="pmcid">PMC7880807</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Boyd</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Jordan</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Blackburn</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The development and psychometric properties of LIWC2015</article-title>
          <source>The University of Texas at Austin</source>
          <year>2015</year>
          <access-date>2022-01-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://repositories.lib.utexas.edu/bitstream/handle/2152/31333/LIWC2015_LanguageManual.pdf">https://repositories.lib.utexas.edu/bitstream/handle/2152/31333/LIWC2015_ LanguageManual.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Goldin</surname>
              <given-names>PR</given-names>
            </name>
            <name name-style="western">
              <surname>Kurita</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Gross</surname>
              <given-names>JJ</given-names>
            </name>
          </person-group>
          <article-title>Self-representation in social anxiety disorder: linguistic analysis of autobiographical narratives</article-title>
          <source>Behav Res Ther</source>
          <year>2008</year>
          <month>10</month>
          <volume>46</volume>
          <issue>10</issue>
          <fpage>1119</fpage>
          <lpage>25</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/18722589"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.brat.2008.07.001</pub-id>
          <pub-id pub-id-type="medline">18722589</pub-id>
          <pub-id pub-id-type="pii">S0005-7967(08)00151-4</pub-id>
          <pub-id pub-id-type="pmcid">PMC2630512</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Palan</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Schitter</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Prolific.ac—a subject pool for online experiments</article-title>
          <source>J Behav Exp Finance</source>
          <year>2018</year>
          <month>03</month>
          <volume>17</volume>
          <fpage>22</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jbef.2017.12.004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Spitzer</surname>
              <given-names>RL</given-names>
            </name>
            <name name-style="western">
              <surname>Kroenke</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Williams</surname>
              <given-names>JB</given-names>
            </name>
            <name name-style="western">
              <surname>Löwe</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>A brief measure for assessing generalized anxiety disorder: the GAD-7</article-title>
          <source>Arch Intern Med</source>
          <year>2006</year>
          <month>05</month>
          <day>22</day>
          <volume>166</volume>
          <issue>10</issue>
          <fpage>1092</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1001/archinte.166.10.1092</pub-id>
          <pub-id pub-id-type="medline">16717171</pub-id>
          <pub-id pub-id-type="pii">166/10/1092</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Reilly</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Fisher</surname>
              <given-names>JL</given-names>
            </name>
          </person-group>
          <article-title>Sherlock Holmes and the strange case of the missing attribution: a historical note on "The Grandfather Passage"</article-title>
          <source>J Speech Lang Hear Res</source>
          <year>2012</year>
          <month>02</month>
          <volume>55</volume>
          <issue>1</issue>
          <fpage>84</fpage>
          <lpage>8</lpage>
          <pub-id pub-id-type="doi">10.1044/1092-4388(2011/11-0158)</pub-id>
          <pub-id pub-id-type="medline">22354714</pub-id>
          <pub-id pub-id-type="pii">55/1/84</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kirschbaum</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Pirke</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Hellhammer</surname>
              <given-names>DH</given-names>
            </name>
          </person-group>
          <article-title>The 'Trier Social Stress Test'-a tool for investigating psychobiological stress responses in a laboratory setting</article-title>
          <source>Neuropsychobiology</source>
          <year>1993</year>
          <volume>28</volume>
          <issue>1-2</issue>
          <fpage>76</fpage>
          <lpage>81</lpage>
          <pub-id pub-id-type="doi">10.1159/000119004</pub-id>
          <pub-id pub-id-type="medline">8255414</pub-id>
          <pub-id pub-id-type="pii">119004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gerra</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Zaimovic</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Zambelli</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Timpano</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Reali</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Bernasconi</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brambilla</surname>
              <given-names>F</given-names>
            </name>
          </person-group>
          <article-title>Neuroendocrine responses to psychological stress in adolescents with anxiety disorder</article-title>
          <source>Neuropsychobiology</source>
          <year>2000</year>
          <volume>42</volume>
          <issue>2</issue>
          <fpage>82</fpage>
          <lpage>92</lpage>
          <pub-id pub-id-type="doi">10.1159/000026677</pub-id>
          <pub-id pub-id-type="medline">10940763</pub-id>
          <pub-id pub-id-type="pii">26677</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Jezova</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Makatsori</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Duncko</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Moncek</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Jakubek</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>High trait anxiety in healthy subjects is associated with low neuroendocrine activity during psychosocial stress</article-title>
          <source>Prog Neuropsychopharmacol Biol Psychiatry</source>
          <year>2004</year>
          <month>12</month>
          <volume>28</volume>
          <issue>8</issue>
          <fpage>1331</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.1016/j.pnpbp.2004.08.005</pub-id>
          <pub-id pub-id-type="medline">15588760</pub-id>
          <pub-id pub-id-type="pii">S0278-5846(04)00183-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Endler</surname>
              <given-names>NS</given-names>
            </name>
            <name name-style="western">
              <surname>Kocovski</surname>
              <given-names>NL</given-names>
            </name>
          </person-group>
          <article-title>State and trait anxiety revisited</article-title>
          <source>J Anxiety Disord</source>
          <year>2001</year>
          <volume>15</volume>
          <issue>3</issue>
          <fpage>231</fpage>
          <lpage>45</lpage>
          <pub-id pub-id-type="doi">10.1016/s0887-6185(01)00060-3</pub-id>
          <pub-id pub-id-type="medline">11442141</pub-id>
          <pub-id pub-id-type="pii">S0887-6185(01)00060-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>First</surname>
              <given-names>MB</given-names>
            </name>
          </person-group>
          <source>Structured Clinical Interview for DSM-IV Axis I Disorders SCID-I: Clinician Version</source>
          <year>1997</year>
          <publisher-loc>Washington, DC, USA</publisher-loc>
          <publisher-name>American Psychiatric Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kliper</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Portuguese</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Weinshall</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Prosodic analysis of speech and the underlying mental state</article-title>
          <source>Proceedings of the 5th International Conference on the Pervasive Computing Paradigms for Mental Health</source>
          <year>2015</year>
          <conf-name>MindCare '15</conf-name>
          <conf-date>September 24-25, 2015</conf-date>
          <conf-loc>Milan, Italy</conf-loc>
          <fpage>52</fpage>
          <lpage>62</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-319-32270-4_6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pennebaker</surname>
              <given-names>JW</given-names>
            </name>
            <name name-style="western">
              <surname>Mehl</surname>
              <given-names>MR</given-names>
            </name>
            <name name-style="western">
              <surname>Niederhoffer</surname>
              <given-names>KG</given-names>
            </name>
          </person-group>
          <article-title>Psychological aspects of natural language use: our words, our selves</article-title>
          <source>Annu Rev Psychol</source>
          <year>2003</year>
          <volume>54</volume>
          <fpage>547</fpage>
          <lpage>77</lpage>
          <pub-id pub-id-type="doi">10.1146/annurev.psych.54.101601.145041</pub-id>
          <pub-id pub-id-type="medline">12185209</pub-id>
          <pub-id pub-id-type="pii">101601.145041</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sabahi</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>my-voice-analysis</article-title>
          <source>GitHub</source>
          <year>2021</year>
          <month>9</month>
          <day>1</day>
          <access-date>2022-01-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://github.com/Shahabks/my-voice-analysis">https://github.com/Shahabks/my-voice-analysis</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lenain</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Weston</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Shivkumar</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Fristed</surname>
              <given-names>E</given-names>
            </name>
          </person-group>
          <article-title>Surfboard: audio feature extraction for modern machine learning</article-title>
          <source>Proceedings of the 2020 Inter Speech</source>
          <year>2020</year>
          <conf-name>IS '20</conf-name>
          <conf-date>October 25-29, 2020</conf-date>
          <conf-loc>Shanghai, China</conf-loc>
          <fpage>2917</fpage>
          <lpage>21</lpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2020-2879</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McFee</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Raffel</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Ellis</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>McVicar</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Battenberg</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Nieto</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>librosa: audio and music signal analysis in Python</article-title>
          <source>Proceedings of the 14th Python in Science Conference</source>
          <year>2015</year>
          <conf-name>SciPy '15</conf-name>
          <conf-date>July 6-12, 2015</conf-date>
          <conf-loc>Austin, TX, USA</conf-loc>
          <fpage>18</fpage>
          <lpage>24</lpage>
          <pub-id pub-id-type="doi">10.25080/majora-7b98e3ed-003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aalto</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Malinen</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Vainio</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Aronoff</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Formants</article-title>
          <source>Oxford Research Encyclopedia of Linguistics</source>
          <year>2018</year>
          <publisher-loc>Oxford, UK</publisher-loc>
          <publisher-name>Oxford University Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Silber-Varod</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Kreiner</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lovett</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Levi-Belz</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Amir</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Do social anxiety individuals hesitate more? The prosodic profile of hesitation disfluencies in Social Anxiety Disorder individuals</article-title>
          <source>Proceedings of the 8th Speech Prosody</source>
          <year>2016</year>
          <conf-name>SpeechProsody '16</conf-name>
          <conf-date>May 31-June 3, 2016</conf-date>
          <conf-loc>Boston, MA, USA</conf-loc>
          <fpage>1211</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.21437/speechprosody.2016-249</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fuller</surname>
              <given-names>BF</given-names>
            </name>
            <name name-style="western">
              <surname>Horii</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Conner</surname>
              <given-names>DA</given-names>
            </name>
          </person-group>
          <article-title>Validity and reliability of nonverbal voice measures as indicators of stressor-provoked anxiety</article-title>
          <source>Res Nurs Health</source>
          <year>1992</year>
          <month>10</month>
          <volume>15</volume>
          <issue>5</issue>
          <fpage>379</fpage>
          <lpage>89</lpage>
          <pub-id pub-id-type="doi">10.1002/nur.4770150507</pub-id>
          <pub-id pub-id-type="medline">1529122</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hashemipour</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ali</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Amazon Web Services (AWS) – an overview of the on-demand cloud computing platform</article-title>
          <source>Proceedings of the 3rd EAI International Conference on Emerging Technologies in Computing</source>
          <year>2020</year>
          <conf-name>iCETiC '20</conf-name>
          <conf-date>August 19-20, 2020</conf-date>
          <conf-loc>London, UK</conf-loc>
          <fpage>40</fpage>
          <lpage>7</lpage>
          <pub-id pub-id-type="doi">10.1007/978-3-030-60036-5_3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cameron</surname>
              <given-names>OG</given-names>
            </name>
            <name name-style="western">
              <surname>Hill</surname>
              <given-names>EM</given-names>
            </name>
          </person-group>
          <article-title>Women and anxiety</article-title>
          <source>Psychiatr Clin North Am</source>
          <year>1989</year>
          <month>03</month>
          <volume>12</volume>
          <issue>1</issue>
          <fpage>175</fpage>
          <lpage>86</lpage>
          <pub-id pub-id-type="doi">10.1016/s0193-953x(18)30459-3</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Krasucki</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Howard</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Mann</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>The relationship between anxiety disorders and age</article-title>
          <source>Int J Geriatr Psychiatry</source>
          <year>1998</year>
          <month>02</month>
          <volume>13</volume>
          <issue>2</issue>
          <fpage>79</fpage>
          <lpage>99</lpage>
          <pub-id pub-id-type="doi">10.1002/(sici)1099-1166(199802)13:2&#60;79::aid-gps739&#62;3.0.co;2-g</pub-id>
          <pub-id pub-id-type="medline">9526178</pub-id>
          <pub-id pub-id-type="pii">10.1002/(SICI)1099-1166(199802)13:2&#60;79::AID-GPS739&#62;3.0.CO;2-G</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Dijkstra-Kersten</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Biesheuvel-Leliefeld</surname>
              <given-names>KE</given-names>
            </name>
            <name name-style="western">
              <surname>van der Wouden</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Penninx</surname>
              <given-names>BW</given-names>
            </name>
            <name name-style="western">
              <surname>van Marwijk</surname>
              <given-names>HW</given-names>
            </name>
          </person-group>
          <article-title>Associations of financial strain and income with depressive and anxiety disorders</article-title>
          <source>J Epidemiol Community Health</source>
          <year>2015</year>
          <month>07</month>
          <volume>69</volume>
          <issue>7</issue>
          <fpage>660</fpage>
          <lpage>5</lpage>
          <pub-id pub-id-type="doi">10.1136/jech-2014-205088</pub-id>
          <pub-id pub-id-type="medline">25636322</pub-id>
          <pub-id pub-id-type="pii">jech-2014-205088</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pellegrini</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Hämäläinen</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>de Mareüil</surname>
              <given-names>PB</given-names>
            </name>
            <name name-style="western">
              <surname>Tjalve</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Trancoso</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Candeias</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dias</surname>
              <given-names>MS</given-names>
            </name>
            <name name-style="western">
              <surname>Braga</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>A corpus-based study of elderly and young speakers of European Portuguese: acoustic correlates and their impact on speech recognition performance</article-title>
          <source>Proceedings of the 2013 Inter Speech</source>
          <year>2013</year>
          <conf-name>InterSpeech '13</conf-name>
          <conf-date>August 25-29, 2013</conf-date>
          <conf-loc>Lyon, France</conf-loc>
          <fpage>853</fpage>
          <lpage>6</lpage>
          <pub-id pub-id-type="doi">10.21437/interspeech.2013-241</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Farrow</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Grolleau</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Mzoughi</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>What in the word! The scope for the effect of word choice on economic behavior</article-title>
          <source>Kyklos</source>
          <year>2018</year>
          <month>10</month>
          <day>10</day>
          <volume>71</volume>
          <issue>4</issue>
          <fpage>557</fpage>
          <lpage>80</lpage>
          <pub-id pub-id-type="doi">10.1111/kykl.12186</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Baba</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Shibata</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Sibuya</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Partial correlation and conditional correlation as measures of conditional independence</article-title>
          <source>Aust NZ J Stat</source>
          <year>2004</year>
          <month>12</month>
          <volume>46</volume>
          <issue>4</issue>
          <fpage>657</fpage>
          <lpage>64</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1467-842x.2004.00360.x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Di Matteo</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fotinos</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lokuge</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yu</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Sternat</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Katzman</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Rose</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>The relationship between smartphone-recorded environmental audio and symptomatology of anxiety and depression: exploratory study</article-title>
          <source>JMIR Form Res</source>
          <year>2020</year>
          <month>08</month>
          <day>13</day>
          <volume>4</volume>
          <issue>8</issue>
          <fpage>e18751</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://formative.jmir.org/2020/8/e18751/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/18751</pub-id>
          <pub-id pub-id-type="medline">32788153</pub-id>
          <pub-id pub-id-type="pii">v4i8e18751</pub-id>
          <pub-id pub-id-type="pmcid">PMC7453326</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Di Matteo</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Fotinos</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Lokuge</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Mason</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Sternat</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Katzman</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Rose</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Automated screening for social anxiety, generalized anxiety, and depression from objective smartphone-collected data: cross-sectional study</article-title>
          <source>J Med Internet Res</source>
          <year>2021</year>
          <month>08</month>
          <day>13</day>
          <volume>23</volume>
          <issue>8</issue>
          <fpage>e28918</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2021/8/e28918/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/28918</pub-id>
          <pub-id pub-id-type="medline">34397386</pub-id>
          <pub-id pub-id-type="pii">v23i8e28918</pub-id>
          <pub-id pub-id-type="pmcid">PMC8398720</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Di Matteo</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>Inference of anxiety and depression from smartphone-collected data</article-title>
          <source>University of Toronto</source>
          <year>2021</year>
          <access-date>2022-01-24</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://tspace.library.utoronto.ca/bitstream/1807/108894/1/Di_Matteo_Daniel_202111_PhD_thesis.pdf">https://tspace.library.utoronto.ca/bitstream/1807/108894/1/Di_Matteo_Daniel_202111_PhD_thesis.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eichstaedt</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Smith</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Merchant</surname>
              <given-names>RM</given-names>
            </name>
            <name name-style="western">
              <surname>Ungar</surname>
              <given-names>LH</given-names>
            </name>
            <name name-style="western">
              <surname>Crutchley</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Preoţiuc-Pietro</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Asch</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Schwartz</surname>
              <given-names>HA</given-names>
            </name>
          </person-group>
          <article-title>Facebook language predicts depression in medical records</article-title>
          <source>Proc Natl Acad Sci U S A</source>
          <year>2018</year>
          <month>10</month>
          <day>30</day>
          <volume>115</volume>
          <issue>44</issue>
          <fpage>11203</fpage>
          <lpage>8</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.pnas.org/doi/abs/10.1073/pnas.1802331115?url_ver=Z39.88-2003&#38;rfr_id=ori:rid:crossref.org&#38;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1073/pnas.1802331115</pub-id>
          <pub-id pub-id-type="medline">30322910</pub-id>
          <pub-id pub-id-type="pii">1802331115</pub-id>
          <pub-id pub-id-type="pmcid">PMC6217418</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
