<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JMH</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Ment Health</journal-id>
      <journal-title>JMIR Mental Health</journal-title>
      <issn pub-type="epub">2368-7959</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i8e39807</article-id>
      <article-id pub-id-type="pmid">35969444</article-id>
      <article-id pub-id-type="doi">10.2196/39807</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Using Voice Biomarkers to Classify Suicide Risk in Adult Telehealth Callers: Retrospective Observational Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Torous</surname>
            <given-names>John</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Zhang</surname>
            <given-names>Kai</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Helic</surname>
            <given-names>Denis</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Iyer</surname>
            <given-names>Ravi</given-names>
          </name>
          <degrees>BSc, MSc</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Centre for Mental Health</institution>
            <institution>Swinburne University of Technology</institution>
            <addr-line>34 Wakefield Street</addr-line>
            <addr-line>Hawthorn, 3122</addr-line>
            <country>Australia</country>
            <phone>61 454565575</phone>
            <email>raviiyer@swin.edu.au</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-7699-0846</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Nedeljkovic</surname>
            <given-names>Maja</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0963-0335</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Meyer</surname>
            <given-names>Denny</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-9902-0858</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Centre for Mental Health</institution>
        <institution>Swinburne University of Technology</institution>
        <addr-line>Hawthorn</addr-line>
        <country>Australia</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Ravi Iyer <email>raviiyer@swin.edu.au</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>15</day>
        <month>8</month>
        <year>2022</year>
      </pub-date>
      <volume>9</volume>
      <issue>8</issue>
      <elocation-id>e39807</elocation-id>
      <history>
        <date date-type="received">
          <day>24</day>
          <month>5</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>14</day>
          <month>6</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>17</day>
          <month>6</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>22</day>
          <month>7</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Ravi Iyer, Maja Nedeljkovic, Denny Meyer. Originally published in JMIR Mental Health (https://mental.jmir.org), 15.08.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Mental Health, is properly cited. The complete bibliographic information, a link to the original publication on https://mental.jmir.org/, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://mental.jmir.org/2022/8/e39807" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Artificial intelligence has the potential to innovate current practices used to detect the imminent risk of suicide and to address shortcomings in traditional assessment methods.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>In this paper, we sought to automatically classify short segments (40 milliseconds) of speech according to low versus imminent risk of suicide in a large number (n=281) of telephone calls made to 2 telehealth counselling services in Australia.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A total of 281 help line telephone call recordings sourced from On The Line, Australia (n=266, 94.7%) and 000 Emergency services, Canberra (n=15, 5.3%) were included in this study. Imminent risk of suicide was coded for when callers affirmed intent, plan, and the availability of means; level of risk was assessed by the responding counsellor and reassessed by a team of clinical researchers using the Columbia Suicide Severity Rating Scale (=5/6). Low risk of suicide was coded for in an absence of intent, plan, and means and via Columbia suicide Severity Scale Ratings (=1/2). Preprocessing involved normalization and pre-emphasis of voice signals, while voice biometrics were extracted using the statistical language r. Candidate predictors were identified using Lasso regression. Each voice biomarker was assessed as a predictor of suicide risk using a generalized additive mixed effects model with splines to account for nonlinearity. Finally, a component-wise gradient boosting model was used to classify each call recording based on precoded suicide risk ratings.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>A total of 77 imminent-risk calls were compared with 204 low-risk calls. Moreover, 36 voice biomarkers were extracted from each speech frame. Caller sex was a significant moderating factor (<italic>β</italic>=–.84, 95% CI –0.85, –0.84; <italic>t</italic>=6.59, <italic>P</italic>&#60;.001). Candidate biomarkers were reduced to 11 primary markers, with distinct models developed for men and women. Using leave-one-out cross-validation, ensuring that the speech frames of no single caller featured in both training and test data sets simultaneously, an area under the precision or recall curve of 0.985 was achieved (95% CI 0.97, 1.0). The gamboost classification model correctly classified 469,332/470,032 (99.85%) speech frames.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>This study demonstrates an objective, efficient, and economical assessment of imminent suicide risk in an ecologically valid setting with potential applications to real-time assessment and response.</p>
        </sec>
        <sec sec-type="trial registration">
          <title>Trial Registration</title>
          <p>Australian New Zealand Clinical Trials Registry ACTRN12622000486729; https://www.anzctr.org.au/ACTRN12622000486729.aspx</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>voice biometrics</kwd>
        <kwd>suicide prevention</kwd>
        <kwd>machine learning</kwd>
        <kwd>telehealth</kwd>
        <kwd>suicide</kwd>
        <kwd>telehealth</kwd>
        <kwd>risk prediction</kwd>
        <kwd>prediction model</kwd>
        <kwd>voice biomarker</kwd>
        <kwd>mental health</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Suicide remains the 4th leading cause of death among 15- to 45-year-olds internationally [<xref ref-type="bibr" rid="ref1">1</xref>]. However, traditional risk factor–based assessment has failed to identify suicide risk in a targeted and timely manner [<xref ref-type="bibr" rid="ref2">2</xref>]. There has been historically a poor understanding of which risk factors contribute most to identifying an escalation in suicide risk [<xref ref-type="bibr" rid="ref2">2</xref>]. This has led to calls for alternative approaches to evaluation, coupled with more powerful means of analysis [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
      <p>Suicide risk assessment using voice biomarkers holds significant promise. Several candidate voice biomarkers have been identified that discriminate accurately between low and high risk of suicide, including timing and prosody–based features [<xref ref-type="bibr" rid="ref4">4</xref>]. When combined with high-powered forms of statistical analysis (eg, machine learning), voice biomarkers offer an objective, unobtrusive, and economically feasible approach for this purpose.</p>
      <p>In a promising study by Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>], an accuracy of 85% was obtained using a range of voice biomarkers that classified 379 calls according to the 3 categories of high risk of suicide, mentally ill without prior history of suicidal ideation, and healthy controls. However, the classification was obtained using support vector machines, a powerful machine learning approach that can analyze nonlinear data, but for which post hoc interpretability is unavailable [<xref ref-type="bibr" rid="ref6">6</xref>]. Thus, with a support vector machine, it is difficult to understand which voice biomarkers are important and which are not. These important considerations will be addressed in our new study.</p>
      <p>Souririajan and colleagues [<xref ref-type="bibr" rid="ref7">7</xref>] replicated the Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>] study with 94 US veterans, meeting the criteria for Gulf War syndrome measured at months 0, 1, 2, 3, 6, and 12 after recruitment. A range of voice biomarkers provided only modest discrimination between low and high risk of suicide (area under the receiver operating characteristic curve=0.64) [<xref ref-type="bibr" rid="ref7">7</xref>]. However, male Gulf War veterans, who formed the majority of the sample (80.0%), are at lower risk of suicide than the general population [<xref ref-type="bibr" rid="ref8">8</xref>], and reliance upon item 9 of the Patient Health Questionnaire alone (“Thoughts that you’d be better off dead, or thoughts of hurting yourself in some way?”) is associated with higher rates for false positives when compared with the more comprehensive Columbia Suicide Severity Rating Scale used by Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>].</p>
      <p>Attempts have been made by a number of international jurisdictions to codify a hierarchy of patient suicide risk and appropriate response. Following the UK guidelines, Victoria, Australia has developed the Statewide Mental Health Triage Scale [<xref ref-type="bibr" rid="ref9">9</xref>]. Seven levels of risk are defined, with “current actions endangering self” afforded the highest level of risk, followed by very high risk of imminent harm, high risk, moderate risk, low risk, referral required, and advice or information provision at the lowest level of risk. The second highest category, very high risk, specifies acute suicidal ideation accompanied by clear plan and means. Neither Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>] nor Sourirajan [<xref ref-type="bibr" rid="ref7">7</xref>] clearly indicated which level of risk was being targeted in their studies. In this new study, we are targeting low risk and below compared with very high risk of imminent harm.</p>
      <p>Furthermore, the studies by Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>] and Sourirajan [<xref ref-type="bibr" rid="ref7">7</xref>] lack translation into real-world settings. Based on their studies, high levels of accuracy seem plausible only when participants are recruited from inpatient services [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>], interviewed under lab conditions, and risk of suicide is assumed to remain static over time. To extend the generalizability of these findings, participants need to be recruited from ecologically valid settings and assessed when elevation in suicide risk occurs.</p>
      <p>The help line services we partnered with in this study represent ecologically valid settings. Help line services have played an important role in early detection and response to suicide risk in the community since the early 1950s [<xref ref-type="bibr" rid="ref12">12</xref>]. In recent years, help line services have witnessed a significant increase in the volume of suicide-related presentations resulting from the COVID-19 pandemic [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. Help line services support important avenues of suicide detection and prevention by providing equitability of access, promotion of disclosures and trust, and supplementation of traditional forms of health care [<xref ref-type="bibr" rid="ref14">14</xref>]. However, suicide assessment via telehealth is challenged by the absence of nonverbal cues, by time limitations, and by the reticence of some callers to verbally express suicidal intent [<xref ref-type="bibr" rid="ref15">15</xref>].</p>
      <p>Where there is reasonable suspicion that the caller may have taken actions to endanger themselves, emergency management protocols can be triggered. This typically involves dispatch of police and ambulance to perform a welfare check. However, scarce emergency resources can also be dispatched when the caller is not at imminent risk, thus potentially diverting life-saving services from other emergencies. Alternatively, there is the threat of a serious risk of harm when imminent risk of suicide is not detected and therefore not responded to. These unfortunate high-stake scenarios can result in high-pressure work environments that can adversely affect service providers and the individual affected, making it critically important that assessments of imminent suicide risk are as close to 100% correct as possible.</p>
      <p>Artificial intelligence has the potential to detect risk of suicide in an accurate, efficient, and timely manner. Although there is initial evidence for the efficacy of such an approach [<xref ref-type="bibr" rid="ref4">4</xref>], current evidence lacks application to real-world ecologies and real-time assessment, both of which are essential if these insights are to move beyond the lab. Thus, we aimed to use artificial intelligence approaches to automatically classify in real time a large sample of telephone counselling calls made to Australian suicide-prevention help line services using voice biomarkers. By classifying counselling help line calls to a very high level of accuracy, we aim to demonstrate a viable support to existing help line infrastructure that can be employed in real time.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <p><xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> illustrates the analysis workflow.</p>
      <sec>
        <title>Call Recordings</title>
        <p>A total of 532 telephone call recordings were sourced for this retrospective observational study. Of these, 77 (14.5%) featured imminent risk of suicide, while 204 (38.3%) featured low risk of suicide. Participants were callers of Suicide Call-Back Service (a national help line service coordinated by On The Line, Australia) and 000 Emergency Services, Canberra wishing to discuss themes relevant to suicide risk. On The Line call recordings (n=517, 97.2%) were randomly sampled from July 1, 2019, to June 30, 2021, stratified by organizationally determined suicide risk level and disclosed sex of caller. In the case of 000 calls (n=15, 2.8%), call recordings were randomly sampled over the same time to reflect callers exhibiting imminent risk of suicide, necessitating emergency services’ response. Moderate-risk calls (236/517, 45.6%; Columbia Suicide Severity Risk ratings= 3 or 4) were removed from further analysis as they were not relevant to the aims of the study.</p>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>No contact information for callers was possible, and a waiver of consent was granted by the Swinburne University Human Research Ethics Committee (reference number: 2021-4340). This study is reported in accordance with the CONSORT (Consolidated Standards of Reporting Trials) checklist [<xref ref-type="bibr" rid="ref16">16</xref>]. A CONSORT attrition flowchart is provided in <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>. This study was registered with the Australian and New Zealand Clinical Trials Registry (ACTRN12622000486729) [<xref ref-type="bibr" rid="ref17">17</xref>].</p>
      </sec>
      <sec>
        <title>Preprocessing of Calls</title>
        <p>All calls were recorded in monochannel 8-kHz, 32-bit float format. Preprocessing involved transformation to 16-bit pulse-code modulation format, normalization, and pre-emphasis, which attenuated low signals and emphasized higher frequency signals to clarify the degree of audibility. This was important to reduce the effect of background noise. Listwise removal of silent frames (1,283,286/1,752,618, 73.2% speech frames) was performed prior to the following analyses.</p>
      </sec>
      <sec>
        <title>Selection of Low Versus Imminent Risk of Suicide Calls for Analysis</title>
        <p>A multigated approach informed the designation of suicide risk level. Imminent risk of suicide was confirmed via affirmative responses (by the caller) to the following 3 screening questions: “are you having thoughts of suicide?”, “do you have a plan?”, and “are the means available?”, in compliance with triage guidelines [<xref ref-type="bibr" rid="ref11">11</xref>]. The level of risk was then reassessed at the conclusion of each call by the responding counsellor using an organizationally developed framework; 6-point Likert-style scale (0-1=low; 2-3=medium; and 4-6=high). Responding counsellors also made clinical notes (eg, presentation of important content), which were inspected to ensure good correspondence to the assigned levels of suicide risk.</p>
        <p>The level of risk for each call was then reassessed by a team of associate researchers (n=6), blinded to the initial rating. The associate researchers were psychologists either provisionally or fully registered with the Psychology Board of Australia, who had substantial prior experience working with suicidal presentations in telehealth settings. A random sample of calls (n=100) was provided to each researcher for reassessment using the Columbia Suicide Severity Rating Scale, a validated measure of suicide risk when used by clinicians [<xref ref-type="bibr" rid="ref18">18</xref>] and administered via telephone [<xref ref-type="bibr" rid="ref19">19</xref>]. Interrater reliability (kappa) of the Suicide Severity Rating Scale among the team of 6 associate researchers was 0.92 for a random selection of 12 recordings. The researchers were also asked to annotate segments of each recording using appropriate audio software (Audacity, Version 2.4.2; Audacity Team). Annotated segments of each recording were to be free from the counsellors’ voice as much as possible. Each annotated segment of speech was also described using mental status examination language.</p>
      </sec>
      <sec>
        <title>Derivation of Voice Biomarkers for Identifying Imminent-Risk Calls</title>
        <p>Each annotated sound segment was divided into 50% overlapping 40 milliseconds Blackman-filter windowed frames [<xref ref-type="bibr" rid="ref20">20</xref>]. The frame size ensured an adequate level of magnification of important characteristics at the center of the frame, while the degree of overlap ensured that the tails of each window did not remove valuable information. The modelling of risk occurred via 36 different voice biomarkers. Voice biomarkers are defined at both the 40 milliseconds speech frame and segment levels in the generalized additive mixed effects regression model described below.</p>
      </sec>
      <sec>
        <title>Reduction of Voice Biomarkers Using Penalized Lasso Regression</title>
        <p>Penalized Lasso regression [<xref ref-type="bibr" rid="ref21">21</xref>] was performed in the first instance to reduce the number of possible predictors to only those with a strong relationship with suicide risk. However, this model assumes linear relationships between predictors and response (conveyed via a logit link function) and ignores gender effects and correlations among segments across a single call. Thus, this model was used primarily to reduce the set of predictors that informed subsequent analyses.</p>
      </sec>
      <sec>
        <title>Validation via Mixed Effects Generalized Linear Regression</title>
        <p>A 3-level model best reflected the approach to data collection. This model was used to confirm the significance of the reduced predictor set and test for significant moderation by caller sex, while allowing for the correlation between speech frames within each call. Model variables are summarized in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p>
        <p>Splines were applied to each biomarker to account for nonlinearity [<xref ref-type="bibr" rid="ref22">22</xref>]. Random intercepts at level 3 accounted for differences between individual calls, and a binomial model with logit link was used to identify imminent risk speech frames in terms of the level 1 and 2 voice biomarkers.</p>
        <p>Without a comparable prior study, a power analysis for the final classification algorithm was not feasible. However, Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>] were able to achieve levels of classification accuracy of 85% with 371 recordings (level 2) and ~15 voice biomarkers (level 1). With a more precise classification model (component-wise gradient boosting) and a mixture of level 1 and level 2 predictors, we anticipated that a smaller sample size would suffice for this new study.</p>
      </sec>
      <sec>
        <title>Classification of Calls Using a Gradient Boosting Classification Model</title>
        <p>Although powerful, support vector machines, as used by Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>], have notable disadvantages. Computation time is prohibitive when the data set is large (eg, &#62;100,000 observations) and the choice of kernel, which allows the algorithm to choose a path of demarcation between groups while minimizing misclassification error, can be difficult. This is especially true when there is little to guide the choice of kernel, which is the case in analysis problems concerning voice biometrics. Finally, the mathematical complexity of support vector machines reduces the transparency of classification decision-making.</p>
        <p>In comparison to support vector machines, gradient boosting is a computationally simpler approach that addresses many of the aforementioned problems. However, in its base implementation, it assumes linearity among the predictors. This can be remedied with an alternative implementation. Component-wise gradient boosting can analyze nonlinear data by first estimating a generalized additive mixed model with splines added, and then applying each model component (individual predictors and random components) to achieve the best reduction in classification error (eg, see Hofner [<xref ref-type="bibr" rid="ref23">23</xref>] for a detailed overview). It is an approach that also allows for sex-moderated effects for all biomarkers.</p>
        <p>Leave-one-out cross validation was used to test the classification accuracy of the gamboost model and to prevent information leakage occurring if data from one participant was used in both the training and test data sets. Thus, n–1 callers were used to train the model, leaving the nested speech frames of a single caller as the test case, ensuring independence of data between training and test data sets. Classification probabilities were derived for each speech frame (40 milliseconds) within each hold-out caller. Frame level classification probabilities were summarized by the mean classification probability for each hold-out caller.</p>
        <p>The Youden J index was used to derive the ideal cut point that maximized upon both sensitivity and specificity of the classification accuracy across all hold-out callers in relation to binary precoded suicide risk level. A total of 1000 bootstrap samples were estimated, and the mean of these estimated samples was used as the ideal cut point. This approach minimizes sample-specific bias and possible overestimation of diagnostic utility, as discussed in Thiele and Herschfeld [<xref ref-type="bibr" rid="ref24">24</xref>], and is an approach used by other authors, such as Hentschel [<xref ref-type="bibr" rid="ref25">25</xref>].</p>
        <p>Overfitting is suggested when there is a drop in classification accuracy between training and validation classification accuracy, suggesting that the algorithm has <italic>memorized</italic> the basis for classification and applies these insights poorly to new data. Classification accuracy was determined via accuracy measures including area under the receiver operating characteristic and area under the precision-recall curve. Plain language descriptions of all voice biometrics are contained in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <p>Select caller demographics are summarized in <xref ref-type="supplementary-material" rid="app5">Multimedia Appendix 5</xref>. The sample comprised 77/281 (27.4%) callers at imminent risk of suicide and 204/281 (72.6%) at low risk of suicide (n=87, 40% male callers and n=194, 70% female callers). Voice biomarkers were derived and analyzed for each of the 470,032 forty-millisecond speech frames. Median number of annotated segments per recording was 13.0 (SD 14.58), and median length of each segment was 118.50 (SD 120.19) milliseconds.</p>
      <sec>
        <title>Reduction of Voice Biomarkers Using Penalized Lasso Regression</title>
        <p>Penalized Lasso regression was performed to reduce the number of voice biomarkers used to predict imminent suicide. A total of 36 initial voice biomarkers were reduced to 12. The significant predictors are summarized in <xref ref-type="supplementary-material" rid="app6">Multimedia Appendix 6</xref>.</p>
      </sec>
      <sec>
        <title>Validation via Mixed Effects Generalized Linear Regression</title>
        <p>A generalized additive mixed model was employed to validate the 12 predictors chosen by the Lasso regression. The model explained 12.0% (adjusted R<sup>2</sup>) of the variance in risk level at the segment level (N=3070 annotated segments).</p>
        <p><xref ref-type="table" rid="table1">Table 1</xref> summarizes the significance of spline coefficients in the generalized additive mixed model. Sex of caller was a significant moderator. The effective degrees of freedom indicate the degree of nonlinearity for each voice biomarker, with higher effective degrees of freedom indicating a greater degree of nonlinearity, and effective degrees of freedom close to 1 indicating linearity. <xref rid="figure1" ref-type="fig">Figure 1</xref> illustrates the relationship between each voice biomarkers and the probability of imminent suicide, separately for male and female callers. For example, the plot of root mean squared amplitude suggests that both male and female callers speak with less signal strength (conceptually analogous to speaking in hushed tones) when at imminent risk of suicide. Conversely, increases in spectral slope were observed in both male and female callers as the level of risk of suicide increased, suggesting an increase in physiological effort when experiencing increasing suicidal stress.</p>
        <p>These results confirmed that 11 (92%) of the 12 voice biomarkers were significant predictors of imminent suicide risk. First formant frequency, which proved nonsignificant for both male and female callers, was not included in the subsequent Component-wise gradient boosting model.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Voice biomarker significance in the prediction of suicide risk: generalized additive mixed model<sup>a</sup> (adjusted R2=0.12; N=3070).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="250"/>
            <col width="110"/>
            <col width="110"/>
            <col width="170"/>
            <col width="110"/>
            <col width="110"/>
            <col width="110"/>
            <thead>
              <tr valign="bottom">
                <td colspan="2">Voice biomarker significance</td>
                <td>
                  <italic>β</italic>
                </td>
                <td>SE</td>
                <td>95% CI</td>
                <td>EDF<sup>b</sup></td>
                <td><italic>F</italic>-test</td>
                <td><italic>P</italic> value</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="2">
                  <bold>Male</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Root mean squared amplitude (dB)</td>
                <td>–19.02</td>
                <td>6.91</td>
                <td>(–19.26, –18.77)</td>
                <td>4.23</td>
                <td>10.99</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Dominant frequency (Hz)</td>
                <td>–1.10</td>
                <td>1.10</td>
                <td>(–1.14, –1.07)</td>
                <td>1.00</td>
                <td>1.00</td>
                <td>.32</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Entropy</td>
                <td>–2.21</td>
                <td>0.80</td>
                <td>(–2.24, –2.18)</td>
                <td>1.00</td>
                <td>7.56</td>
                <td>.006</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>1</sub> frequency (Hz)</td>
                <td>–0.42</td>
                <td>0.89</td>
                <td>(–0.45, –0.39)</td>
                <td>1.00</td>
                <td>0.23</td>
                <td>.63</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>1</sub> width (Hz)</td>
                <td>–0.60</td>
                <td>1.28</td>
                <td>(–0.65, –0.56)</td>
                <td>2.46</td>
                <td>3.05</td>
                <td>.06</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>2</sub> frequency (Hz)</td>
                <td>–0.88</td>
                <td>0.84</td>
                <td>(–0.91, –0.85)</td>
                <td>1.00</td>
                <td>1.11</td>
                <td>.29</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>2</sub> width (Hz)</td>
                <td>2.09</td>
                <td>0.86</td>
                <td>(2.06, 2.12)</td>
                <td>3.57</td>
                <td>10.16</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>3</sub> frequency (Hz)</td>
                <td>–3.61</td>
                <td>0.76</td>
                <td>(–3.64, –3.59)</td>
                <td>1.00</td>
                <td>22.68</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Loudness</td>
                <td>8.57</td>
                <td>2.79</td>
                <td>(8.47, 8.67)</td>
                <td>1.00</td>
                <td>9.52</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>50th quartile (Hz)</td>
                <td>–1.14</td>
                <td>1.27</td>
                <td>(–1.19, –1.10)</td>
                <td>1.03</td>
                <td>0.82</td>
                <td>.37</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Roughness</td>
                <td>–1.05</td>
                <td>0.71</td>
                <td>(–1.07, –1.02)</td>
                <td>1.00</td>
                <td>2.21</td>
                <td>.12</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Spectral slope</td>
                <td>7.60</td>
                <td>2.09</td>
                <td>(7.53, 7.67)</td>
                <td>4.10</td>
                <td>4.76</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td colspan="2">
                  <bold>Female</bold>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
                <td>
                  <break/>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Root mean squared amplitude (dB)</td>
                <td>–3.67</td>
                <td>2.28</td>
                <td>(–3.75, –3.59)</td>
                <td>3.74</td>
                <td>7.11</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Dominant frequency (Hz)</td>
                <td>–2.41</td>
                <td>0.90</td>
                <td>(–2.44, –2.37)</td>
                <td>1.02</td>
                <td>6.63</td>
                <td>.01</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Entropy</td>
                <td>0.36</td>
                <td>0.49</td>
                <td>(0.34, 0.38)</td>
                <td>1.00</td>
                <td>0.54</td>
                <td>.47</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>1</sub> frequency (Hz)</td>
                <td>–3.09</td>
                <td>1.10</td>
                <td>(–3.13, –3.05)</td>
                <td>4.34</td>
                <td>2.71</td>
                <td>.02</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>1</sub> width (Hz)</td>
                <td>3.28</td>
                <td>1.35</td>
                <td>(3.23, 3.32)</td>
                <td>3.44</td>
                <td>2.00</td>
                <td>.07</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>2</sub> frequency (Hz)</td>
                <td>0.17</td>
                <td>0.76</td>
                <td>(0.14, 0.19)</td>
                <td>2.79</td>
                <td>4.18</td>
                <td>.02</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>2</sub> width (Hz)</td>
                <td>–0.74</td>
                <td>0.58</td>
                <td>(–0.76, –0.72)</td>
                <td>2.90</td>
                <td>5.52</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Formant<sub>3</sub> frequency (Hz)</td>
                <td>0.32</td>
                <td>0.50</td>
                <td>(0.30, 0.34)</td>
                <td>1.01</td>
                <td>0.39</td>
                <td>.53</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Loudness</td>
                <td>–5.97</td>
                <td>4.55</td>
                <td>(–6.13, –5.81)</td>
                <td>1.91</td>
                <td>8.34</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>50th quartile (Hz)</td>
                <td>–2.53</td>
                <td>0.74</td>
                <td>(–2.56, –2.50)</td>
                <td>1.03</td>
                <td>10.56</td>
                <td>&#60;.001</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Roughness</td>
                <td>1.62</td>
                <td>0.62</td>
                <td>(1.60, 1.64)</td>
                <td>2.58</td>
                <td>4.65</td>
                <td>.005</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>Spectral slope</td>
                <td>4.99</td>
                <td>1.20</td>
                <td>(4.94, 5.03)</td>
                <td>4.10</td>
                <td>10.53</td>
                <td>&#60;.001</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Male versus female: <italic>β</italic>=–.84; SE 0.002, 95% CI (–0.85, –0.84), <italic>t</italic>=6.59 (2 tailed); <italic>P</italic>&#60;.001.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>EDF: effective degrees of freedom.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>Plots of generalized additive mixed effects model predictors; nonlinear relationship between voice biomarkers and the risk of suicide with 95% CIs.</p>
          </caption>
          <graphic xlink:href="mental_v9i8e39807_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Classification of Calls Using a Gradient Boosting Classification Model</title>
        <p>Component-wise gradient boosting was used to classify each speech frame in terms of low and imminent risk of suicide. Leave-one-out cross-validation was used to test the classification accuracy of the gamboost model. A Youden J index value of 0.51 optimized upon both sensitivity and specificity in the classification of imminent risk. We correctly classified 469,332/470,032 (99.85%) speech frames (area under the receiver operating characteristic=1.0, 95% CI 1.0-1.0; area under the precision-recall curve=0.989, 95% CI 0.97-1.00).</p>
        <p>While all low–suicide-risk speech frames were correctly classified, 700 (0.53%) of the 132,741 imminent risk frames were misclassified as low. This corresponded with the speech frames of a single caller in the 000 Emergency Services, Canberra sample. Upon closer inspection, this caller presented in an intoxicated manner, having ingested a “large amount of sleeping tablets.” Mental status examination annotations made by the reviewing team of psychologists indicated this caller spoke with slow-to-normal rate of speech and flat-to-neutral affect, and was responsive to all questions asked, a presentation similar to many low–suicide-risk callers.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>The development of a timely and accurate form of suicide risk assessment remains a significant challenge, especially if implemented in a real-time capacity as required by suicide-prevention help lines. In this study, we sought to automatically classify short segments of speech obtained from 2 suicide-prevention telehealth services in Australia, according to low and imminent risk of suicide using supervised machine learning approaches. We successfully classified 469,332/470,032 (99.85%) speech frames, with only a small number (700/132,741, 0.53%) of high-risk speech frames misclassified.</p>
      <p>Our study compares favorably with the findings of Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>], who successfully classified 322 (85.0%) of 379 participant recordings discriminating between low and high risk of suicide, using support vector machines. However, our study differs in a number of important ways from the aforementioned study. Rather than classify suicide risk at the holistic recording level, we instead classified risk at the 40 milliseconds frame level. This allowed us to expand upon the size of the data set upon which the classification algorithm could be trained and validated, allowing for a more nuanced assessment of each voice biomarker. This approach also demonstrates that only a short segment of a call is required for suicide risk classification, suggesting that the algorithm can be used for triage purposes based on only a short exchange (eg, an exchange with a triaging chatbot).</p>
      <p>Second, we achieved a greater level of transparency and refinement than was afforded by the support vector machine in the study by Pestian and colleagues [<xref ref-type="bibr" rid="ref5">5</xref>]. <xref rid="figure1" ref-type="fig">Figure 1</xref> illustrates the exact nature of the relationship between the 11 voice biomarkers and the level of risk of suicide for men and women and suggests several discernible nuances in the ways male and female callers might speak when experiencing suicidal stress.</p>
      <p>Finally, we reduced the numbers of possible predictors (via Lasso regression) to ensure that only the most statistically relevant biomarkers were included in later models. Our choice of generalized additive mixed model validated the use of all but one of the voice biomarkers selected by the Lasso regression.</p>
      <p>However, there are limitations in our approach. We did not include callers of minority status, such as members of the LGBTIQ+ (lesbian, gay, bisexual, transgender, intersex, queer, and other people of diverse sexuality, gender, or bodily characteristics) communities and callers of non–English speaking backgrounds. These community members may offer valuable information that can further enhance the diversity of input, practical outcomes, and nuance of our analyses overall.</p>
      <p>We were also not always successful in classifying all calls in the imminent suicide risk category. The similarity between the presentations of the single misclassified imminent risk caller and other low suicide risk callers suggests a possible subsample of callers who may be in the midst of a suicide attempt that is not being recognized by our classification approach. This is of concern given this presentation is most at need of timely emergency support. This suggests a possible role for other forms of classification such as natural language processing of speech-to-text translation, which might reduce similar misclassifications in the future.</p>
      <p>There were also notable strengths in our approach. Our industry partnerships with On The Line, Australia and the Australian Federal Police, Canberra ensured that we could trial this novel technology in ecologically valid settings, where call quality is often degraded and background noise evident. This contrasts with the clinical settings within which the majority of studies have thus far been conducted. Our multigated approach to the assignment of suicide risk to each call ensured the establishment of a credible ground truth that was pivotal in accurately training the classification algorithm. Our choice of advanced statistical modelling has ensured a robust account of error variance in estimating the probability of imminent suicide. A final strength of this study is the visualization of imminent risk of suicide in terms of voice biomarkers allowing for nonlinearity.</p>
      <p>This study has taken evidence from 25 years of pilot research and extended it to real world scenarios involving present-moment intent to suicide. However, it would be beneficial to control for caller age. We did not control for age in any of our analyses; however, given the well-documented age-related changes in vocal quality, this should feature in subsequent analyses and would boost an account of variance achieved by future mixed effects modelling. Finally, these compelling findings suggest possible implementation within a suicide-prevention telehealth service as an avenue worthy of further exploration.</p>
    </sec>
  </body>
  <back>
    <app-group>
      <supplementary-material id="app1">
        <label>Multimedia Appendix 1</label>
        <p>Analysis workflow.</p>
        <media xlink:href="mental_v9i8e39807_app1.png" xlink:title="PNG File , 589 KB"/>
      </supplementary-material>
      <supplementary-material id="app2">
        <label>Multimedia Appendix 2</label>
        <p>CONSORT (Consolidated Standards of Reporting Trials) attrition flowchart.</p>
        <media xlink:href="mental_v9i8e39807_app2.png" xlink:title="PNG File , 341 KB"/>
      </supplementary-material>
      <supplementary-material id="app3">
        <label>Multimedia Appendix 3</label>
        <p>Summary generalized additive mixed effect model predictors.</p>
        <media xlink:href="mental_v9i8e39807_app3.docx" xlink:title="DOCX File , 14 KB"/>
      </supplementary-material>
      <supplementary-material id="app4">
        <label>Multimedia Appendix 4</label>
        <p>Definitions of voice biomarkers terminology.</p>
        <media xlink:href="mental_v9i8e39807_app4.docx" xlink:title="DOCX File , 15 KB"/>
      </supplementary-material>
      <supplementary-material id="app5">
        <label>Multimedia Appendix 5</label>
        <p>Summary caller characteristics.</p>
        <media xlink:href="mental_v9i8e39807_app5.docx" xlink:title="DOCX File , 15 KB"/>
      </supplementary-material>
      <supplementary-material id="app6">
        <label>Multimedia Appendix 6</label>
        <p>Summary of significant level 1 and level 2 Lasso regression predictors.</p>
        <media xlink:href="mental_v9i8e39807_app6.docx" xlink:title="DOCX File , 15 KB"/>
      </supplementary-material>
    </app-group>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">CONSORT</term>
          <def>
            <p>Consolidated Standards of Reporting Trials</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">LGBTIQ+</term>
          <def>
            <p>lesbian, gay, bisexual, transgender, intersex, queer, and other people of diverse sexuality, gender, or bodily characteristics</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="con">
        <p>RI was responsible for conceptualization, data curation, investigation, methodology, and writing—original draft. MN was responsible for supervision and writing—review and editing. DM was responsible for methodology, supervision, and writing—review and editing. RI and DM have accessed and verified the underlying data reported in this manuscript. All authors had full access to the data reported in this manuscript. All authors accept full responsibility for the submission of this manuscript.</p>
      </fn>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="web">
          <article-title>Suicide in the world: global health estimates</article-title>
          <source>World Health Organization</source>
          <year>2019</year>
          <access-date>2020-11-17</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://apps.who.int/iris/bitstream/handle/10665/326948/WHO-MSD-MER-19.3-eng.pdf">https://apps.who.int/iris/bitstream/handle/10665/326948/WHO-MSD-MER-19.3-eng.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Franklin</surname>
              <given-names>JC</given-names>
            </name>
            <name name-style="western">
              <surname>Ribeiro</surname>
              <given-names>JD</given-names>
            </name>
            <name name-style="western">
              <surname>Fox</surname>
              <given-names>KR</given-names>
            </name>
            <name name-style="western">
              <surname>Bentley</surname>
              <given-names>KH</given-names>
            </name>
            <name name-style="western">
              <surname>Kleiman</surname>
              <given-names>EM</given-names>
            </name>
            <name name-style="western">
              <surname>Huang</surname>
              <given-names>X</given-names>
            </name>
            <name name-style="western">
              <surname>Musacchio</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Jaroszewski</surname>
              <given-names>AC</given-names>
            </name>
            <name name-style="western">
              <surname>Chang</surname>
              <given-names>BP</given-names>
            </name>
            <name name-style="western">
              <surname>Nock</surname>
              <given-names>MK</given-names>
            </name>
          </person-group>
          <article-title>Risk factors for suicidal thoughts and behaviors: A meta-analysis of 50 years of research</article-title>
          <source>Psychol Bull</source>
          <year>2017</year>
          <month>02</month>
          <volume>143</volume>
          <issue>2</issue>
          <fpage>187</fpage>
          <lpage>232</lpage>
          <pub-id pub-id-type="doi">10.1037/bul0000084</pub-id>
          <pub-id pub-id-type="medline">27841450</pub-id>
          <pub-id pub-id-type="pii">2016-54856-001</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Čukić</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>López</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Pavón</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Classification of Depression Through Resting-State Electroencephalogram as a Novel Practice in Psychiatry: Review</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>11</month>
          <day>03</day>
          <volume>22</volume>
          <issue>11</issue>
          <fpage>e19548</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/11/e19548/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/19548</pub-id>
          <pub-id pub-id-type="medline">33141088</pub-id>
          <pub-id pub-id-type="pii">v22i11e19548</pub-id>
          <pub-id pub-id-type="pmcid">PMC7671839</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cummins</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Krajewski</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Schnieder</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Epps</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Quatieri</surname>
              <given-names>TF</given-names>
            </name>
          </person-group>
          <article-title>A review of depression and suicide risk assessment using speech analysis</article-title>
          <source>Speech Communication</source>
          <year>2015</year>
          <month>07</month>
          <volume>71</volume>
          <fpage>10</fpage>
          <lpage>49</lpage>
          <pub-id pub-id-type="doi">10.1016/j.specom.2015.03.004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Pestian</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Sorter</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Connolly</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Bretonnel Cohen</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>McCullumsmith</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Gee</surname>
              <given-names>JT</given-names>
            </name>
            <name name-style="western">
              <surname>Morency</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rohlfs</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Research Group</surname>
              <given-names>STM</given-names>
            </name>
          </person-group>
          <article-title>A Machine Learning Approach to Identifying the Thought Markers of Suicidal Subjects: A Prospective Multicenter Trial</article-title>
          <source>Suicide Life Threat Behav</source>
          <year>2017</year>
          <month>02</month>
          <volume>47</volume>
          <issue>1</issue>
          <fpage>112</fpage>
          <lpage>121</lpage>
          <pub-id pub-id-type="doi">10.1111/sltb.12312</pub-id>
          <pub-id pub-id-type="medline">27813129</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Došilović</surname>
              <given-names>FK</given-names>
            </name>
            <name name-style="western">
              <surname>Brčić</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Hlupić</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Explainable artificial intelligence: A survey</article-title>
          <year>2018</year>
          <conf-name>41st International Convention on Information and Communication Technology, Electronics and Microelectronics (MIPRO)</conf-name>
          <conf-date>July 2, 2018</conf-date>
          <conf-loc>Opatija, Croatia</conf-loc>
          <pub-id pub-id-type="doi">10.23919/MIPRO.2018.8400040</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sourirajan</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Belouali</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Dutton</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Reinhard</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Pathak</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>A Machine Learning Approach to Detect Suicidal Ideation in US Veterans Based on Acoustic and Linguistic Features of Speech</article-title>
          <source>arXiv</source>
          <year>2020</year>
          <month>09</month>
          <day>14</day>
          <fpage>Preprint</fpage>
          <pub-id pub-id-type="doi">10.1186/s13040-021-00245-y</pub-id>
          <pub-id pub-id-type="medline">33531048</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barth</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Kang</surname>
              <given-names>HK</given-names>
            </name>
            <name name-style="western">
              <surname>Bullman</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>All-Cause Mortality Among US Veterans of the Persian Gulf War: 13-Year Follow-up</article-title>
          <source>Public Health Rep</source>
          <year>2016</year>
          <month>11</month>
          <volume>131</volume>
          <issue>6</issue>
          <fpage>822</fpage>
          <lpage>830</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://europepmc.org/abstract/MED/28123229"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/0033354916676278</pub-id>
          <pub-id pub-id-type="medline">28123229</pub-id>
          <pub-id pub-id-type="pii">10.1177_0033354916676278</pub-id>
          <pub-id pub-id-type="pmcid">PMC5230824</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Fisk</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Livingstone</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Pit</surname>
              <given-names>SW</given-names>
            </name>
          </person-group>
          <article-title>Telehealth in the Context of COVID-19: Changing Perspectives in Australia, the United Kingdom, and the United States</article-title>
          <source>J Med Internet Res</source>
          <year>2020</year>
          <month>06</month>
          <day>09</day>
          <volume>22</volume>
          <issue>6</issue>
          <fpage>e19264</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2020/6/e19264/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/19264</pub-id>
          <pub-id pub-id-type="medline">32463377</pub-id>
          <pub-id pub-id-type="pii">v22i6e19264</pub-id>
          <pub-id pub-id-type="pmcid">PMC7286230</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hashim</surname>
              <given-names>NW</given-names>
            </name>
            <name name-style="western">
              <surname>Wilkes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Salomon</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Meggs</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>France</surname>
              <given-names>DJ</given-names>
            </name>
          </person-group>
          <article-title>Evaluation of Voice Acoustics as Predictors of Clinical Depression Scores</article-title>
          <source>J Voice</source>
          <year>2017</year>
          <month>03</month>
          <volume>31</volume>
          <issue>2</issue>
          <fpage>256.e1</fpage>
          <lpage>256.e6</lpage>
          <pub-id pub-id-type="doi">10.1016/j.jvoice.2016.06.006</pub-id>
          <pub-id pub-id-type="medline">27473933</pub-id>
          <pub-id pub-id-type="pii">S0892-1997(16)30105-9</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Venek</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Scherer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Morency</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Rizzo</surname>
              <given-names>AS</given-names>
            </name>
            <name name-style="western">
              <surname>Pestian</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Adolescent Suicidal Risk Assessment in Clinician-Patient Interaction</article-title>
          <source>IEEE Trans. Affective Comput</source>
          <year>2017</year>
          <month>4</month>
          <day>1</day>
          <volume>8</volume>
          <issue>2</issue>
          <fpage>204</fpage>
          <lpage>215</lpage>
          <pub-id pub-id-type="doi">10.1109/taffc.2016.2518665</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="web">
          <article-title>Annual report 2020-21</article-title>
          <source>Lifline</source>
          <year>2021</year>
          <access-date>2022-02-21</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.lifeline.org.au/media/vwop50aj/lifeline-annual-report-2021-v2.pdf">https://www.lifeline.org.au/media/vwop50aj/lifeline-annual-report-2021-v2.pdf</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brülhart</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Klotzbücher</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Lalive</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Reich</surname>
              <given-names>SK</given-names>
            </name>
          </person-group>
          <article-title>Mental health concerns during the COVID-19 pandemic as revealed by helpline calls</article-title>
          <source>Nature</source>
          <year>2021</year>
          <month>12</month>
          <volume>600</volume>
          <issue>7887</issue>
          <fpage>121</fpage>
          <lpage>126</lpage>
          <pub-id pub-id-type="doi">10.1038/s41586-021-04099-6</pub-id>
          <pub-id pub-id-type="medline">34789873</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41586-021-04099-6</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Darbeda</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Aubin</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Lejoyeux</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Luquiens</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Characteristics of Gamblers Who Use the French National Problem Gambling Helpline and Real-Time Chat Facility: Longitudinal Observational Study</article-title>
          <source>JMIR Form Res</source>
          <year>2020</year>
          <month>05</month>
          <day>20</day>
          <volume>4</volume>
          <issue>5</issue>
          <fpage>e13388</fpage>
          <pub-id pub-id-type="doi">10.2196/13388</pub-id>
          <pub-id pub-id-type="medline">32432554</pub-id>
          <pub-id pub-id-type="pii">v4i5e13388</pub-id>
          <pub-id pub-id-type="pmcid">PMC7270843</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hunt</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>CJ</given-names>
            </name>
            <name name-style="western">
              <surname>Woodward</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Caputi</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Intervention among Suicidal Men: Future Directions for Telephone Crisis Support Research</article-title>
          <source>Front Public Health</source>
          <year>2018</year>
          <volume>6</volume>
          <fpage>1</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fpubh.2018.00001"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fpubh.2018.00001</pub-id>
          <pub-id pub-id-type="medline">29404319</pub-id>
          <pub-id pub-id-type="pmcid">PMC5780337</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Eysenbach</surname>
              <given-names>G</given-names>
            </name>
            <collab>CONSORT-EHEALTH Group</collab>
          </person-group>
          <article-title>CONSORT-EHEALTH: improving and standardizing evaluation reports of Web-based and mobile health interventions</article-title>
          <source>J Med Internet Res</source>
          <year>2011</year>
          <month>12</month>
          <day>31</day>
          <volume>13</volume>
          <issue>4</issue>
          <fpage>e126</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2011/4/e126/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/jmir.1923</pub-id>
          <pub-id pub-id-type="medline">22209829</pub-id>
          <pub-id pub-id-type="pii">v13i4e126</pub-id>
          <pub-id pub-id-type="pmcid">PMC3278112</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Iyer</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Nedeljkovic</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Meyer</surname>
              <given-names>D</given-names>
            </name>
          </person-group>
          <article-title>ANZCTR study protocol</article-title>
          <source>Australian New Zealand Clinical Trials Registry</source>
          <access-date>2022-03-02</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.anzctr.org.au/ACTRN12622000486729.aspx">https://www.anzctr.org.au/ACTRN12622000486729.aspx</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barzilay</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Yaseen</surname>
              <given-names>ZS</given-names>
            </name>
            <name name-style="western">
              <surname>Hawes</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kopeykina</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Ardalan</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenfield</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Murrough</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Galynker</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Determinants and Predictive Value of Clinician Assessment of Short-Term Suicide Risk</article-title>
          <source>Suicide Life Threat Behav</source>
          <year>2019</year>
          <month>04</month>
          <day>17</day>
          <volume>49</volume>
          <issue>2</issue>
          <fpage>614</fpage>
          <lpage>626</lpage>
          <pub-id pub-id-type="doi">10.1111/sltb.12462</pub-id>
          <pub-id pub-id-type="medline">29665120</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Katz</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Barry</surname>
              <given-names>CN</given-names>
            </name>
            <name name-style="western">
              <surname>Cooper</surname>
              <given-names>SA</given-names>
            </name>
            <name name-style="western">
              <surname>Kasprow</surname>
              <given-names>WJ</given-names>
            </name>
            <name name-style="western">
              <surname>Hoff</surname>
              <given-names>RA</given-names>
            </name>
          </person-group>
          <article-title>Use of the Columbia-Suicide Severity Rating Scale (C-SSRS) in a large sample of Veterans receiving mental health services in the Veterans Health Administration</article-title>
          <source>Suicide Life Threat Behav</source>
          <year>2020</year>
          <month>02</month>
          <volume>50</volume>
          <issue>1</issue>
          <fpage>111</fpage>
          <lpage>121</lpage>
          <pub-id pub-id-type="doi">10.1111/sltb.12584</pub-id>
          <pub-id pub-id-type="medline">31441952</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Qu</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Jin</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Jing</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Gao</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Liang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>An Intelligent Mobile-Enabled System for Diagnosing Parkinson Disease: Development and Validation of a Speech Impairment Detection System</article-title>
          <source>JMIR Med Inform</source>
          <year>2020</year>
          <month>09</month>
          <day>16</day>
          <volume>8</volume>
          <issue>9</issue>
          <fpage>e18689</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://medinform.jmir.org/2020/9/e18689/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/18689</pub-id>
          <pub-id pub-id-type="medline">32936086</pub-id>
          <pub-id pub-id-type="pii">v8i9e18689</pub-id>
          <pub-id pub-id-type="pmcid">PMC7527911</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Desboulets</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>A Review on Variable Selection in Regression Analysis</article-title>
          <source>Econometrics</source>
          <year>2018</year>
          <month>11</month>
          <day>23</day>
          <volume>6</volume>
          <issue>4</issue>
          <fpage>45</fpage>
          <pub-id pub-id-type="doi">10.3390/econometrics6040045</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Coretta</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Modelling electroglottographic data with wavegrams and generalised additive mixed models</article-title>
          <source>OSF Preprints</source>
          <year>2019</year>
          <month>08</month>
          <day>29</day>
          <fpage>Preprint</fpage>
          <pub-id pub-id-type="doi">10.31219/osf.io/m623d</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hofner</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Mayr</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Robinzonov</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Schmid</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Model-based boosting in R: a hands-on tutorial using the R package mboost</article-title>
          <source>Comput Stat</source>
          <year>2012</year>
          <month>12</month>
          <day>22</day>
          <volume>29</volume>
          <issue>1-2</issue>
          <fpage>3</fpage>
          <lpage>35</lpage>
          <pub-id pub-id-type="doi">10.1007/s00180-012-0382-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Thiele</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Hirschfeld</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>cutpointr: improved estimation and validation of optimal cutpoints in R</article-title>
          <source>J. Stat. Soft</source>
          <year>2021</year>
          <volume>98</volume>
          <issue>11</issue>
          <fpage>1</fpage>
          <lpage>27</lpage>
          <pub-id pub-id-type="doi">10.18637/jss.v098.i11</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hentschel</surname>
              <given-names>AE</given-names>
            </name>
            <name name-style="western">
              <surname>Beijert</surname>
              <given-names>IJ</given-names>
            </name>
            <name name-style="western">
              <surname>Bosschieter</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Kauer</surname>
              <given-names>PC</given-names>
            </name>
            <name name-style="western">
              <surname>Vis</surname>
              <given-names>AN</given-names>
            </name>
            <name name-style="western">
              <surname>Lissenberg-Witte</surname>
              <given-names>BI</given-names>
            </name>
            <name name-style="western">
              <surname>van Moorselaar</surname>
              <given-names>RJA</given-names>
            </name>
            <name name-style="western">
              <surname>Steenbergen</surname>
              <given-names>RDM</given-names>
            </name>
            <name name-style="western">
              <surname>Nieuwenhuijzen</surname>
              <given-names>JA</given-names>
            </name>
          </person-group>
          <article-title>Bladder cancer detection in urine using DNA methylation markers: a technical and prospective preclinical validation</article-title>
          <source>Clin Epigenetics</source>
          <year>2022</year>
          <month>02</month>
          <day>05</day>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>19</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://clinicalepigeneticsjournal.biomedcentral.com/articles/10.1186/s13148-022-01240-8"/>
          </comment>
          <pub-id pub-id-type="doi">10.1186/s13148-022-01240-8</pub-id>
          <pub-id pub-id-type="medline">35123558</pub-id>
          <pub-id pub-id-type="pii">10.1186/s13148-022-01240-8</pub-id>
          <pub-id pub-id-type="pmcid">PMC8818199</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
