@inproceedings{thorn-jakobsen-etal-2021-spurious,
title = "Spurious Correlations in Cross-Topic Argument Mining",
author = "Thorn Jakobsen, Terne Sasha and
Barrett, Maria and
S{\o}gaard, Anders",
editor = "Ku, Lun-Wei and
Nastase, Vivi and
Vuli{\'c}, Ivan",
booktitle = "Proceedings of *SEM 2021: The Tenth Joint Conference on Lexical and Computational Semantics",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.starsem-1.25",
doi = "10.18653/v1/2021.starsem-1.25",
pages = "263--277",
abstract = "Recent work in cross-topic argument mining attempts to learn models that generalise across topics rather than merely relying on within-topic spurious correlations. We examine the effectiveness of this approach by analysing the output of single-task and multi-task models for cross-topic argument mining, through a combination of linear approximations of their decision boundaries, manual feature grouping, challenge examples, and ablations across the input vocabulary. Surprisingly, we show that cross-topic models still rely mostly on spurious correlations and only generalise within closely related topics, e.g., a model trained only on closed-class words and a few common open-class words outperforms a state-of-the-art cross-topic model on distant target topics.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="thorn-jakobsen-etal-2021-spurious">
<titleInfo>
<title>Spurious Correlations in Cross-Topic Argument Mining</title>
</titleInfo>
<name type="personal">
<namePart type="given">Terne</namePart>
<namePart type="given">Sasha</namePart>
<namePart type="family">Thorn Jakobsen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maria</namePart>
<namePart type="family">Barrett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anders</namePart>
<namePart type="family">Søgaard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of *SEM 2021: The Tenth Joint Conference on Lexical and Computational Semantics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lun-Wei</namePart>
<namePart type="family">Ku</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Vivi</namePart>
<namePart type="family">Nastase</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ivan</namePart>
<namePart type="family">Vulić</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent work in cross-topic argument mining attempts to learn models that generalise across topics rather than merely relying on within-topic spurious correlations. We examine the effectiveness of this approach by analysing the output of single-task and multi-task models for cross-topic argument mining, through a combination of linear approximations of their decision boundaries, manual feature grouping, challenge examples, and ablations across the input vocabulary. Surprisingly, we show that cross-topic models still rely mostly on spurious correlations and only generalise within closely related topics, e.g., a model trained only on closed-class words and a few common open-class words outperforms a state-of-the-art cross-topic model on distant target topics.</abstract>
<identifier type="citekey">thorn-jakobsen-etal-2021-spurious</identifier>
<identifier type="doi">10.18653/v1/2021.starsem-1.25</identifier>
<location>
<url>https://aclanthology.org/2021.starsem-1.25</url>
</location>
<part>
<date>2021-08</date>
<extent unit="page">
<start>263</start>
<end>277</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Spurious Correlations in Cross-Topic Argument Mining
%A Thorn Jakobsen, Terne Sasha
%A Barrett, Maria
%A Søgaard, Anders
%Y Ku, Lun-Wei
%Y Nastase, Vivi
%Y Vulić, Ivan
%S Proceedings of *SEM 2021: The Tenth Joint Conference on Lexical and Computational Semantics
%D 2021
%8 August
%I Association for Computational Linguistics
%C Online
%F thorn-jakobsen-etal-2021-spurious
%X Recent work in cross-topic argument mining attempts to learn models that generalise across topics rather than merely relying on within-topic spurious correlations. We examine the effectiveness of this approach by analysing the output of single-task and multi-task models for cross-topic argument mining, through a combination of linear approximations of their decision boundaries, manual feature grouping, challenge examples, and ablations across the input vocabulary. Surprisingly, we show that cross-topic models still rely mostly on spurious correlations and only generalise within closely related topics, e.g., a model trained only on closed-class words and a few common open-class words outperforms a state-of-the-art cross-topic model on distant target topics.
%R 10.18653/v1/2021.starsem-1.25
%U https://aclanthology.org/2021.starsem-1.25
%U https://doi.org/10.18653/v1/2021.starsem-1.25
%P 263-277
Markdown (Informal)
[Spurious Correlations in Cross-Topic Argument Mining](https://aclanthology.org/2021.starsem-1.25) (Thorn Jakobsen et al., *SEM 2021)
ACL
- Terne Sasha Thorn Jakobsen, Maria Barrett, and Anders Søgaard. 2021. Spurious Correlations in Cross-Topic Argument Mining. In Proceedings of *SEM 2021: The Tenth Joint Conference on Lexical and Computational Semantics, pages 263–277, Online. Association for Computational Linguistics.