@inproceedings{chandra-etal-2021-beyond,
title = "Beyond Laurel/Yanny: An Autoencoder-Enabled Search for Polyperceivable Audio",
author = "Chandra, Kartik and
Kabaghe, Chuma and
Valiant, Gregory",
editor = "Zong, Chengqing and
Xia, Fei and
Li, Wenjie and
Navigli, Roberto",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.acl-short.75/",
doi = "10.18653/v1/2021.acl-short.75",
pages = "593--598",
abstract = "The famous {\textquotedblleft}laurel/yanny{\textquotedblright} phenomenon references an audio clip that elicits dramatically different responses from different listeners. For the original clip, roughly half the population hears the word {\textquotedblleft}laurel,{\textquotedblright} while the other half hears {\textquotedblleft}yanny.{\textquotedblright} How common are such {\textquotedblleft}polyperceivable{\textquotedblright} audio clips? In this paper we apply ML techniques to study the prevalence of polyperceivability in spoken language. We devise a metric that correlates with polyperceivability of audio clips, use it to efficiently find new {\textquotedblleft}laurel/yanny{\textquotedblright}-type examples, and validate these results with human experiments. Our results suggest that polyperceivable examples are surprisingly prevalent in natural language, existing for {\ensuremath{>}}2{\%} of English words."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chandra-etal-2021-beyond">
<titleInfo>
<title>Beyond Laurel/Yanny: An Autoencoder-Enabled Search for Polyperceivable Audio</title>
</titleInfo>
<name type="personal">
<namePart type="given">Kartik</namePart>
<namePart type="family">Chandra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chuma</namePart>
<namePart type="family">Kabaghe</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Gregory</namePart>
<namePart type="family">Valiant</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chengqing</namePart>
<namePart type="family">Zong</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fei</namePart>
<namePart type="family">Xia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wenjie</namePart>
<namePart type="family">Li</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roberto</namePart>
<namePart type="family">Navigli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The famous “laurel/yanny” phenomenon references an audio clip that elicits dramatically different responses from different listeners. For the original clip, roughly half the population hears the word “laurel,” while the other half hears “yanny.” How common are such “polyperceivable” audio clips? In this paper we apply ML techniques to study the prevalence of polyperceivability in spoken language. We devise a metric that correlates with polyperceivability of audio clips, use it to efficiently find new “laurel/yanny”-type examples, and validate these results with human experiments. Our results suggest that polyperceivable examples are surprisingly prevalent in natural language, existing for \ensuremath>2% of English words.</abstract>
<identifier type="citekey">chandra-etal-2021-beyond</identifier>
<identifier type="doi">10.18653/v1/2021.acl-short.75</identifier>
<location>
<url>https://aclanthology.org/2021.acl-short.75/</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>593</start>
<end>598</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Beyond Laurel/Yanny: An Autoencoder-Enabled Search for Polyperceivable Audio
%A Chandra, Kartik
%A Kabaghe, Chuma
%A Valiant, Gregory
%Y Zong, Chengqing
%Y Xia, Fei
%Y Li, Wenjie
%Y Navigli, Roberto
%S Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F chandra-etal-2021-beyond
%X The famous “laurel/yanny” phenomenon references an audio clip that elicits dramatically different responses from different listeners. For the original clip, roughly half the population hears the word “laurel,” while the other half hears “yanny.” How common are such “polyperceivable” audio clips? In this paper we apply ML techniques to study the prevalence of polyperceivability in spoken language. We devise a metric that correlates with polyperceivability of audio clips, use it to efficiently find new “laurel/yanny”-type examples, and validate these results with human experiments. Our results suggest that polyperceivable examples are surprisingly prevalent in natural language, existing for \ensuremath>2% of English words.
%R 10.18653/v1/2021.acl-short.75
%U https://aclanthology.org/2021.acl-short.75/
%U https://doi.org/10.18653/v1/2021.acl-short.75
%P 593-598
Markdown (Informal)
[Beyond Laurel/Yanny: An Autoencoder-Enabled Search for Polyperceivable Audio](https://aclanthology.org/2021.acl-short.75/) (Chandra et al., ACL-IJCNLP 2021)
ACL
- Kartik Chandra, Chuma Kabaghe, and Gregory Valiant. 2021. Beyond Laurel/Yanny: An Autoencoder-Enabled Search for Polyperceivable Audio. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 593–598, Online. Association for Computational Linguistics.