@inproceedings{zhuang-zhang-2022-yet,
title = "Yet@{SMM}4{H}{'}22: Improved {BERT}-based classification models with Rdrop and {P}oly{L}oss",
author = "Zhuang, Yan and
Zhang, Yanru",
editor = "Gonzalez-Hernandez, Graciela and
Weissenbacher, Davy",
booktitle = "Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop {\&} Shared Task",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.smm4h-1.28",
pages = "98--102",
abstract = "This paper describes our approach for 11 classification tasks (Task1a, Task2a, Task2b, Task3a, Task3b, Task4, Task5, Task6, Task7, Task8 and Task9) from Social Media Mining for Health (SMM4H) 2022 Shared Tasks. We developed a classification model that incorporated Rdrop to augment data and avoid overfitting, Poly Loss and Focal Loss to alleviate sample imbalance, and pseudo labels to improve model performance. The results of our submissions are over or equal to the median scores in almost all tasks. In addition, our model achieved the highest score in Task4, with a higher 7.8{\%} and 5.3{\%} F1-score than the median scores in Task2b and Task3a respectively.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="zhuang-zhang-2022-yet">
<titleInfo>
<title>Yet@SMM4H’22: Improved BERT-based classification models with Rdrop and PolyLoss</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yan</namePart>
<namePart type="family">Zhuang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yanru</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop & Shared Task</title>
</titleInfo>
<name type="personal">
<namePart type="given">Graciela</namePart>
<namePart type="family">Gonzalez-Hernandez</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Davy</namePart>
<namePart type="family">Weissenbacher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Gyeongju, Republic of Korea</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>This paper describes our approach for 11 classification tasks (Task1a, Task2a, Task2b, Task3a, Task3b, Task4, Task5, Task6, Task7, Task8 and Task9) from Social Media Mining for Health (SMM4H) 2022 Shared Tasks. We developed a classification model that incorporated Rdrop to augment data and avoid overfitting, Poly Loss and Focal Loss to alleviate sample imbalance, and pseudo labels to improve model performance. The results of our submissions are over or equal to the median scores in almost all tasks. In addition, our model achieved the highest score in Task4, with a higher 7.8% and 5.3% F1-score than the median scores in Task2b and Task3a respectively.</abstract>
<identifier type="citekey">zhuang-zhang-2022-yet</identifier>
<location>
<url>https://aclanthology.org/2022.smm4h-1.28</url>
</location>
<part>
<date>2022-10</date>
<extent unit="page">
<start>98</start>
<end>102</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Yet@SMM4H’22: Improved BERT-based classification models with Rdrop and PolyLoss
%A Zhuang, Yan
%A Zhang, Yanru
%Y Gonzalez-Hernandez, Graciela
%Y Weissenbacher, Davy
%S Proceedings of The Seventh Workshop on Social Media Mining for Health Applications, Workshop & Shared Task
%D 2022
%8 October
%I Association for Computational Linguistics
%C Gyeongju, Republic of Korea
%F zhuang-zhang-2022-yet
%X This paper describes our approach for 11 classification tasks (Task1a, Task2a, Task2b, Task3a, Task3b, Task4, Task5, Task6, Task7, Task8 and Task9) from Social Media Mining for Health (SMM4H) 2022 Shared Tasks. We developed a classification model that incorporated Rdrop to augment data and avoid overfitting, Poly Loss and Focal Loss to alleviate sample imbalance, and pseudo labels to improve model performance. The results of our submissions are over or equal to the median scores in almost all tasks. In addition, our model achieved the highest score in Task4, with a higher 7.8% and 5.3% F1-score than the median scores in Task2b and Task3a respectively.
%U https://aclanthology.org/2022.smm4h-1.28
%P 98-102
Markdown (Informal)
[Yet@SMM4H’22: Improved BERT-based classification models with Rdrop and PolyLoss](https://aclanthology.org/2022.smm4h-1.28) (Zhuang & Zhang, SMM4H 2022)
ACL