@article{chowdhury-chaturvedi-2022-learning,
title = "Learning Fair Representations via Rate-Distortion Maximization",
author = "Chowdhury, Somnath Basu Roy and
Chaturvedi, Snigdha",
editor = "Roark, Brian and
Nenkova, Ani",
journal = "Transactions of the Association for Computational Linguistics",
volume = "10",
year = "2022",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2022.tacl-1.67",
doi = "10.1162/tacl_a_00512",
pages = "1159--1174",
abstract = "Text representations learned by machine learning models often encode undesirable demographic information of the user. Predictive models based on these representations can rely on such information, resulting in biased decisions. We present a novel debiasing technique, Fairness-aware Rate Maximization (FaRM), that removes protected information by making representations of instances belonging to the same protected attribute class uncorrelated, using the rate-distortion function. FaRM is able to debias representations with or without a target task at hand. FaRM can also be adapted to remove information about multiple protected attributes simultaneously. Empirical evaluations show that FaRM achieves state-of-the-art performance on several datasets, and learned representations leak significantly less protected attribute information against an attack by a non-linear probing network.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chowdhury-chaturvedi-2022-learning">
<titleInfo>
<title>Learning Fair Representations via Rate-Distortion Maximization</title>
</titleInfo>
<name type="personal">
<namePart type="given">Somnath</namePart>
<namePart type="given">Basu</namePart>
<namePart type="given">Roy</namePart>
<namePart type="family">Chowdhury</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Snigdha</namePart>
<namePart type="family">Chaturvedi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Text representations learned by machine learning models often encode undesirable demographic information of the user. Predictive models based on these representations can rely on such information, resulting in biased decisions. We present a novel debiasing technique, Fairness-aware Rate Maximization (FaRM), that removes protected information by making representations of instances belonging to the same protected attribute class uncorrelated, using the rate-distortion function. FaRM is able to debias representations with or without a target task at hand. FaRM can also be adapted to remove information about multiple protected attributes simultaneously. Empirical evaluations show that FaRM achieves state-of-the-art performance on several datasets, and learned representations leak significantly less protected attribute information against an attack by a non-linear probing network.</abstract>
<identifier type="citekey">chowdhury-chaturvedi-2022-learning</identifier>
<identifier type="doi">10.1162/tacl_a_00512</identifier>
<location>
<url>https://aclanthology.org/2022.tacl-1.67</url>
</location>
<part>
<date>2022</date>
<detail type="volume"><number>10</number></detail>
<extent unit="page">
<start>1159</start>
<end>1174</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T Learning Fair Representations via Rate-Distortion Maximization
%A Chowdhury, Somnath Basu Roy
%A Chaturvedi, Snigdha
%J Transactions of the Association for Computational Linguistics
%D 2022
%V 10
%I MIT Press
%C Cambridge, MA
%F chowdhury-chaturvedi-2022-learning
%X Text representations learned by machine learning models often encode undesirable demographic information of the user. Predictive models based on these representations can rely on such information, resulting in biased decisions. We present a novel debiasing technique, Fairness-aware Rate Maximization (FaRM), that removes protected information by making representations of instances belonging to the same protected attribute class uncorrelated, using the rate-distortion function. FaRM is able to debias representations with or without a target task at hand. FaRM can also be adapted to remove information about multiple protected attributes simultaneously. Empirical evaluations show that FaRM achieves state-of-the-art performance on several datasets, and learned representations leak significantly less protected attribute information against an attack by a non-linear probing network.
%R 10.1162/tacl_a_00512
%U https://aclanthology.org/2022.tacl-1.67
%U https://doi.org/10.1162/tacl_a_00512
%P 1159-1174
Markdown (Informal)
[Learning Fair Representations via Rate-Distortion Maximization](https://aclanthology.org/2022.tacl-1.67) (Chowdhury & Chaturvedi, TACL 2022)
ACL