@inproceedings{goyal-etal-2023-else,
title = "What Else Do {I} Need to Know? The Effect of Background Information on Users{'} Reliance on {QA} Systems",
author = "Goyal, Navita and
Briakou, Eleftheria and
Liu, Amanda and
Baumler, Connor and
Bonial, Claire and
Micher, Jeffrey and
Voss, Clare and
Carpuat, Marine and
Daum{\'e} III, Hal",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.emnlp-main.201",
doi = "10.18653/v1/2023.emnlp-main.201",
pages = "3313--3330",
abstract = "NLP systems have shown impressive performance at answering questions by retrieving relevant context. However, with the increasingly large models, it is impossible and often undesirable to constrain models{'} knowledge or reasoning to only the retrieved context. This leads to a mismatch between the information that \textit{the models} access to derive the answer and the information that is available to \textit{the user} to assess the model predicted answer. In this work, we study how users interact with QA systems in the absence of sufficient information to assess their predictions. Further, we ask whether adding the requisite background helps mitigate users{'} over-reliance on predictions. Our study reveals that users rely on model predictions even in the absence of sufficient information needed to assess the model{'}s correctness. Providing the relevant background, however, helps users better catch model errors, reducing over-reliance on incorrect predictions. On the flip side, background information also increases users{'} confidence in their accurate as well as inaccurate judgments. Our work highlights that supporting users{'} verification of QA predictions is an important, yet challenging, problem.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="goyal-etal-2023-else">
<titleInfo>
<title>What Else Do I Need to Know? The Effect of Background Information on Users’ Reliance on QA Systems</title>
</titleInfo>
<name type="personal">
<namePart type="given">Navita</namePart>
<namePart type="family">Goyal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Eleftheria</namePart>
<namePart type="family">Briakou</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Amanda</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Connor</namePart>
<namePart type="family">Baumler</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Claire</namePart>
<namePart type="family">Bonial</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jeffrey</namePart>
<namePart type="family">Micher</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Clare</namePart>
<namePart type="family">Voss</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Marine</namePart>
<namePart type="family">Carpuat</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hal</namePart>
<namePart type="family">Daumé III</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Houda</namePart>
<namePart type="family">Bouamor</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Juan</namePart>
<namePart type="family">Pino</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kalika</namePart>
<namePart type="family">Bali</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Singapore</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>NLP systems have shown impressive performance at answering questions by retrieving relevant context. However, with the increasingly large models, it is impossible and often undesirable to constrain models’ knowledge or reasoning to only the retrieved context. This leads to a mismatch between the information that the models access to derive the answer and the information that is available to the user to assess the model predicted answer. In this work, we study how users interact with QA systems in the absence of sufficient information to assess their predictions. Further, we ask whether adding the requisite background helps mitigate users’ over-reliance on predictions. Our study reveals that users rely on model predictions even in the absence of sufficient information needed to assess the model’s correctness. Providing the relevant background, however, helps users better catch model errors, reducing over-reliance on incorrect predictions. On the flip side, background information also increases users’ confidence in their accurate as well as inaccurate judgments. Our work highlights that supporting users’ verification of QA predictions is an important, yet challenging, problem.</abstract>
<identifier type="citekey">goyal-etal-2023-else</identifier>
<identifier type="doi">10.18653/v1/2023.emnlp-main.201</identifier>
<location>
<url>https://aclanthology.org/2023.emnlp-main.201</url>
</location>
<part>
<date>2023-12</date>
<extent unit="page">
<start>3313</start>
<end>3330</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What Else Do I Need to Know? The Effect of Background Information on Users’ Reliance on QA Systems
%A Goyal, Navita
%A Briakou, Eleftheria
%A Liu, Amanda
%A Baumler, Connor
%A Bonial, Claire
%A Micher, Jeffrey
%A Voss, Clare
%A Carpuat, Marine
%A Daumé III, Hal
%Y Bouamor, Houda
%Y Pino, Juan
%Y Bali, Kalika
%S Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing
%D 2023
%8 December
%I Association for Computational Linguistics
%C Singapore
%F goyal-etal-2023-else
%X NLP systems have shown impressive performance at answering questions by retrieving relevant context. However, with the increasingly large models, it is impossible and often undesirable to constrain models’ knowledge or reasoning to only the retrieved context. This leads to a mismatch between the information that the models access to derive the answer and the information that is available to the user to assess the model predicted answer. In this work, we study how users interact with QA systems in the absence of sufficient information to assess their predictions. Further, we ask whether adding the requisite background helps mitigate users’ over-reliance on predictions. Our study reveals that users rely on model predictions even in the absence of sufficient information needed to assess the model’s correctness. Providing the relevant background, however, helps users better catch model errors, reducing over-reliance on incorrect predictions. On the flip side, background information also increases users’ confidence in their accurate as well as inaccurate judgments. Our work highlights that supporting users’ verification of QA predictions is an important, yet challenging, problem.
%R 10.18653/v1/2023.emnlp-main.201
%U https://aclanthology.org/2023.emnlp-main.201
%U https://doi.org/10.18653/v1/2023.emnlp-main.201
%P 3313-3330
Markdown (Informal)
[What Else Do I Need to Know? The Effect of Background Information on Users’ Reliance on QA Systems](https://aclanthology.org/2023.emnlp-main.201) (Goyal et al., EMNLP 2023)
ACL
- Navita Goyal, Eleftheria Briakou, Amanda Liu, Connor Baumler, Claire Bonial, Jeffrey Micher, Clare Voss, Marine Carpuat, and Hal Daumé III. 2023. What Else Do I Need to Know? The Effect of Background Information on Users’ Reliance on QA Systems. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 3313–3330, Singapore. Association for Computational Linguistics.