@inproceedings{torabi-asr-jones-2017-artificial,
title = "An Artificial Language Evaluation of Distributional Semantic Models",
author = "Torabi Asr, Fatemeh and
Jones, Michael",
editor = "Levy, Roger and
Specia, Lucia",
booktitle = "Proceedings of the 21st Conference on Computational Natural Language Learning ({C}o{NLL} 2017)",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/K17-1015",
doi = "10.18653/v1/K17-1015",
pages = "134--142",
abstract = "Recent studies of distributional semantic models have set up a competition between word embeddings obtained from predictive neural networks and word vectors obtained from abstractive count-based models. This paper is an attempt to reveal the underlying contribution of additional training data and post-processing steps on each type of model in word similarity and relatedness inference tasks. We do so by designing an artificial language framework, training a predictive and a count-based model on data sampled from this grammar, and evaluating the resulting word vectors in paradigmatic and syntagmatic tasks defined with respect to the grammar.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="torabi-asr-jones-2017-artificial">
<titleInfo>
<title>An Artificial Language Evaluation of Distributional Semantic Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Fatemeh</namePart>
<namePart type="family">Torabi Asr</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Michael</namePart>
<namePart type="family">Jones</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2017-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Roger</namePart>
<namePart type="family">Levy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Lucia</namePart>
<namePart type="family">Specia</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Vancouver, Canada</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Recent studies of distributional semantic models have set up a competition between word embeddings obtained from predictive neural networks and word vectors obtained from abstractive count-based models. This paper is an attempt to reveal the underlying contribution of additional training data and post-processing steps on each type of model in word similarity and relatedness inference tasks. We do so by designing an artificial language framework, training a predictive and a count-based model on data sampled from this grammar, and evaluating the resulting word vectors in paradigmatic and syntagmatic tasks defined with respect to the grammar.</abstract>
<identifier type="citekey">torabi-asr-jones-2017-artificial</identifier>
<identifier type="doi">10.18653/v1/K17-1015</identifier>
<location>
<url>https://aclanthology.org/K17-1015</url>
</location>
<part>
<date>2017-08</date>
<extent unit="page">
<start>134</start>
<end>142</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T An Artificial Language Evaluation of Distributional Semantic Models
%A Torabi Asr, Fatemeh
%A Jones, Michael
%Y Levy, Roger
%Y Specia, Lucia
%S Proceedings of the 21st Conference on Computational Natural Language Learning (CoNLL 2017)
%D 2017
%8 August
%I Association for Computational Linguistics
%C Vancouver, Canada
%F torabi-asr-jones-2017-artificial
%X Recent studies of distributional semantic models have set up a competition between word embeddings obtained from predictive neural networks and word vectors obtained from abstractive count-based models. This paper is an attempt to reveal the underlying contribution of additional training data and post-processing steps on each type of model in word similarity and relatedness inference tasks. We do so by designing an artificial language framework, training a predictive and a count-based model on data sampled from this grammar, and evaluating the resulting word vectors in paradigmatic and syntagmatic tasks defined with respect to the grammar.
%R 10.18653/v1/K17-1015
%U https://aclanthology.org/K17-1015
%U https://doi.org/10.18653/v1/K17-1015
%P 134-142
Markdown (Informal)
[An Artificial Language Evaluation of Distributional Semantic Models](https://aclanthology.org/K17-1015) (Torabi Asr & Jones, CoNLL 2017)
ACL