@inproceedings{lin-etal-2018-self,
title = "Self-training improves Recurrent Neural Networks performance for Temporal Relation Extraction",
author = "Lin, Chen and
Miller, Timothy and
Dligach, Dmitriy and
Amiri, Hadi and
Bethard, Steven and
Savova, Guergana",
editor = "Lavelli, Alberto and
Minard, Anne-Lyse and
Rinaldi, Fabio",
booktitle = "Proceedings of the Ninth International Workshop on Health Text Mining and Information Analysis",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5619",
doi = "10.18653/v1/W18-5619",
pages = "165--176",
abstract = "Neural network models are oftentimes restricted by limited labeled instances and resort to advanced architectures and features for cutting edge performance. We propose to build a recurrent neural network with multiple semantically heterogeneous embeddings within a self-training framework. Our framework makes use of labeled, unlabeled, and social media data, operates on basic features, and is scalable and generalizable. With this method, we establish the state-of-the-art result for both in- and cross-domain for a clinical temporal relation extraction task.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="lin-etal-2018-self">
<titleInfo>
<title>Self-training improves Recurrent Neural Networks performance for Temporal Relation Extraction</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chen</namePart>
<namePart type="family">Lin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Timothy</namePart>
<namePart type="family">Miller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dmitriy</namePart>
<namePart type="family">Dligach</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hadi</namePart>
<namePart type="family">Amiri</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Steven</namePart>
<namePart type="family">Bethard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Guergana</namePart>
<namePart type="family">Savova</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-10</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Ninth International Workshop on Health Text Mining and Information Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Alberto</namePart>
<namePart type="family">Lavelli</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Anne-Lyse</namePart>
<namePart type="family">Minard</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Fabio</namePart>
<namePart type="family">Rinaldi</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Brussels, Belgium</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Neural network models are oftentimes restricted by limited labeled instances and resort to advanced architectures and features for cutting edge performance. We propose to build a recurrent neural network with multiple semantically heterogeneous embeddings within a self-training framework. Our framework makes use of labeled, unlabeled, and social media data, operates on basic features, and is scalable and generalizable. With this method, we establish the state-of-the-art result for both in- and cross-domain for a clinical temporal relation extraction task.</abstract>
<identifier type="citekey">lin-etal-2018-self</identifier>
<identifier type="doi">10.18653/v1/W18-5619</identifier>
<location>
<url>https://aclanthology.org/W18-5619</url>
</location>
<part>
<date>2018-10</date>
<extent unit="page">
<start>165</start>
<end>176</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Self-training improves Recurrent Neural Networks performance for Temporal Relation Extraction
%A Lin, Chen
%A Miller, Timothy
%A Dligach, Dmitriy
%A Amiri, Hadi
%A Bethard, Steven
%A Savova, Guergana
%Y Lavelli, Alberto
%Y Minard, Anne-Lyse
%Y Rinaldi, Fabio
%S Proceedings of the Ninth International Workshop on Health Text Mining and Information Analysis
%D 2018
%8 October
%I Association for Computational Linguistics
%C Brussels, Belgium
%F lin-etal-2018-self
%X Neural network models are oftentimes restricted by limited labeled instances and resort to advanced architectures and features for cutting edge performance. We propose to build a recurrent neural network with multiple semantically heterogeneous embeddings within a self-training framework. Our framework makes use of labeled, unlabeled, and social media data, operates on basic features, and is scalable and generalizable. With this method, we establish the state-of-the-art result for both in- and cross-domain for a clinical temporal relation extraction task.
%R 10.18653/v1/W18-5619
%U https://aclanthology.org/W18-5619
%U https://doi.org/10.18653/v1/W18-5619
%P 165-176
Markdown (Informal)
[Self-training improves Recurrent Neural Networks performance for Temporal Relation Extraction](https://aclanthology.org/W18-5619) (Lin et al., Louhi 2018)
ACL