@inproceedings{pirhadi-etal-2022-using,
title = "Using Two Losses and Two Datasets Simultaneously to Improve {T}empo{W}i{C} Accuracy",
author = "Pirhadi, Mohammad Javad and
Mirzaei, Motahhare and
Eetemadi, Sauleh",
editor = "Barbieri, Francesco and
Camacho-Collados, Jose and
Dhingra, Bhuwan and
Espinosa-Anke, Luis and
Gribovskaya, Elena and
Lazaridou, Angeliki and
Loureiro, Daniel and
Neves, Leonardo",
booktitle = "Proceedings of the First Workshop on Ever Evolving NLP (EvoNLP)",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.evonlp-1.3",
doi = "10.18653/v1/2022.evonlp-1.3",
pages = "12--15",
abstract = "WSD (Word Sense Disambiguation) is the task of identifying which sense of a word is meant in a sentence or other segment of text. Researchers have worked on this task (e.g. Pustejovsky, 2002) for years but it{'}s still a challenging one even for SOTA (state-of-the-art) LMs (language models). The new dataset, TempoWiC introduced by Loureiro et al. (2022b) focuses on the fact that words change over time. Their best baseline achieves 70.33{\%} macro-F1. In this work, we use two different losses simultaneously. We also improve our model by using another similar dataset to generalize better. Our best configuration beats their best baseline by 4.23{\%}.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="pirhadi-etal-2022-using">
<titleInfo>
<title>Using Two Losses and Two Datasets Simultaneously to Improve TempoWiC Accuracy</title>
</titleInfo>
<name type="personal">
<namePart type="given">Mohammad</namePart>
<namePart type="given">Javad</namePart>
<namePart type="family">Pirhadi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Motahhare</namePart>
<namePart type="family">Mirzaei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sauleh</namePart>
<namePart type="family">Eetemadi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the First Workshop on Ever Evolving NLP (EvoNLP)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Francesco</namePart>
<namePart type="family">Barbieri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jose</namePart>
<namePart type="family">Camacho-Collados</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bhuwan</namePart>
<namePart type="family">Dhingra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Luis</namePart>
<namePart type="family">Espinosa-Anke</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Elena</namePart>
<namePart type="family">Gribovskaya</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Angeliki</namePart>
<namePart type="family">Lazaridou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Daniel</namePart>
<namePart type="family">Loureiro</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Leonardo</namePart>
<namePart type="family">Neves</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates (Hybrid)</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>WSD (Word Sense Disambiguation) is the task of identifying which sense of a word is meant in a sentence or other segment of text. Researchers have worked on this task (e.g. Pustejovsky, 2002) for years but it’s still a challenging one even for SOTA (state-of-the-art) LMs (language models). The new dataset, TempoWiC introduced by Loureiro et al. (2022b) focuses on the fact that words change over time. Their best baseline achieves 70.33% macro-F1. In this work, we use two different losses simultaneously. We also improve our model by using another similar dataset to generalize better. Our best configuration beats their best baseline by 4.23%.</abstract>
<identifier type="citekey">pirhadi-etal-2022-using</identifier>
<identifier type="doi">10.18653/v1/2022.evonlp-1.3</identifier>
<location>
<url>https://aclanthology.org/2022.evonlp-1.3</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>12</start>
<end>15</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Using Two Losses and Two Datasets Simultaneously to Improve TempoWiC Accuracy
%A Pirhadi, Mohammad Javad
%A Mirzaei, Motahhare
%A Eetemadi, Sauleh
%Y Barbieri, Francesco
%Y Camacho-Collados, Jose
%Y Dhingra, Bhuwan
%Y Espinosa-Anke, Luis
%Y Gribovskaya, Elena
%Y Lazaridou, Angeliki
%Y Loureiro, Daniel
%Y Neves, Leonardo
%S Proceedings of the First Workshop on Ever Evolving NLP (EvoNLP)
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates (Hybrid)
%F pirhadi-etal-2022-using
%X WSD (Word Sense Disambiguation) is the task of identifying which sense of a word is meant in a sentence or other segment of text. Researchers have worked on this task (e.g. Pustejovsky, 2002) for years but it’s still a challenging one even for SOTA (state-of-the-art) LMs (language models). The new dataset, TempoWiC introduced by Loureiro et al. (2022b) focuses on the fact that words change over time. Their best baseline achieves 70.33% macro-F1. In this work, we use two different losses simultaneously. We also improve our model by using another similar dataset to generalize better. Our best configuration beats their best baseline by 4.23%.
%R 10.18653/v1/2022.evonlp-1.3
%U https://aclanthology.org/2022.evonlp-1.3
%U https://doi.org/10.18653/v1/2022.evonlp-1.3
%P 12-15
Markdown (Informal)
[Using Two Losses and Two Datasets Simultaneously to Improve TempoWiC Accuracy](https://aclanthology.org/2022.evonlp-1.3) (Pirhadi et al., EvoNLP 2022)
ACL