@article{Pryzant_Diehl Martinez_Dass_Kurohashi_Jurafsky_Yang_2020, title={Automatically Neutralizing Subjective Bias in Text}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5385}, DOI={10.1609/aaai.v34i01.5385}, abstractNote={<p>Texts like news, encyclopedias, and some social media strive for objectivity. Yet bias in the form of inappropriate subjectivity — introducing attitudes via framing, presupposing truth, and casting doubt — remains ubiquitous. This kind of bias erodes our collective trust and fuels social conflict. To address this issue, we introduce a novel testbed for natural language generation: automatically bringing inappropriately subjective text into a neutral point of view (“neutralizing” biased text). We also offer the first parallel corpus of biased language. The corpus contains 180,000 sentence pairs and originates from Wikipedia edits that removed various framings, presuppositions, and attitudes from biased sentences. Last, we propose two strong encoder-decoder baselines for the task. A straightforward yet opaque concurrent system uses a BERT encoder to identify subjective words as part of the generation process. An interpretable and controllable modular algorithm separates these steps, using (1) a BERT-based classifier to identify problematic words and (2) a novel <em>join embedding</em> through which the classifier can edit the hidden states of the encoder. Large-scale human evaluation across four domains (encyclopedias, news headlines, books, and political speeches) suggests that these algorithms are a first step towards the automatic identification and reduction of bias.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Pryzant, Reid and Diehl Martinez, Richard and Dass, Nathan and Kurohashi, Sadao and Jurafsky, Dan and Yang, Diyi}, year={2020}, month={Apr.}, pages={480-489} }