@article{ben-david-etal-2020-perl,
title = "{PERL}: Pivot-based Domain Adaptation for Pre-trained Deep Contextualized Embedding Models",
author = "Ben-David, Eyal and
Rabinovitz, Carmel and
Reichart, Roi",
editor = "Johnson, Mark and
Roark, Brian and
Nenkova, Ani",
journal = "Transactions of the Association for Computational Linguistics",
volume = "8",
year = "2020",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://aclanthology.org/2020.tacl-1.33",
doi = "10.1162/tacl_a_00328",
pages = "504--521",
abstract = "Pivot-based neural representation models have led to significant progress in domain adaptation for NLP. However, previous research following this approach utilize only labeled data from the source domain and unlabeled data from the source and target domains, but neglect to incorporate massive unlabeled corpora that are not necessarily drawn from these domains. To alleviate this, we propose PERL: A representation learning model that extends contextualized word embedding models such as BERT (Devlin et al., 2019) with pivot-based fine-tuning. PERL outperforms strong baselines across 22 sentiment classification domain adaptation setups, improves in-domain model performance, yields effective reduced-size models, and increases model stability.1",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ben-david-etal-2020-perl">
<titleInfo>
<title>PERL: Pivot-based Domain Adaptation for Pre-trained Deep Contextualized Embedding Models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eyal</namePart>
<namePart type="family">Ben-David</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Carmel</namePart>
<namePart type="family">Rabinovitz</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roi</namePart>
<namePart type="family">Reichart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<genre authority="bibutilsgt">journal article</genre>
<relatedItem type="host">
<titleInfo>
<title>Transactions of the Association for Computational Linguistics</title>
</titleInfo>
<originInfo>
<issuance>continuing</issuance>
<publisher>MIT Press</publisher>
<place>
<placeTerm type="text">Cambridge, MA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">periodical</genre>
<genre authority="bibutilsgt">academic journal</genre>
</relatedItem>
<abstract>Pivot-based neural representation models have led to significant progress in domain adaptation for NLP. However, previous research following this approach utilize only labeled data from the source domain and unlabeled data from the source and target domains, but neglect to incorporate massive unlabeled corpora that are not necessarily drawn from these domains. To alleviate this, we propose PERL: A representation learning model that extends contextualized word embedding models such as BERT (Devlin et al., 2019) with pivot-based fine-tuning. PERL outperforms strong baselines across 22 sentiment classification domain adaptation setups, improves in-domain model performance, yields effective reduced-size models, and increases model stability.1</abstract>
<identifier type="citekey">ben-david-etal-2020-perl</identifier>
<identifier type="doi">10.1162/tacl_a_00328</identifier>
<location>
<url>https://aclanthology.org/2020.tacl-1.33</url>
</location>
<part>
<date>2020</date>
<detail type="volume"><number>8</number></detail>
<extent unit="page">
<start>504</start>
<end>521</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Journal Article
%T PERL: Pivot-based Domain Adaptation for Pre-trained Deep Contextualized Embedding Models
%A Ben-David, Eyal
%A Rabinovitz, Carmel
%A Reichart, Roi
%J Transactions of the Association for Computational Linguistics
%D 2020
%V 8
%I MIT Press
%C Cambridge, MA
%F ben-david-etal-2020-perl
%X Pivot-based neural representation models have led to significant progress in domain adaptation for NLP. However, previous research following this approach utilize only labeled data from the source domain and unlabeled data from the source and target domains, but neglect to incorporate massive unlabeled corpora that are not necessarily drawn from these domains. To alleviate this, we propose PERL: A representation learning model that extends contextualized word embedding models such as BERT (Devlin et al., 2019) with pivot-based fine-tuning. PERL outperforms strong baselines across 22 sentiment classification domain adaptation setups, improves in-domain model performance, yields effective reduced-size models, and increases model stability.1
%R 10.1162/tacl_a_00328
%U https://aclanthology.org/2020.tacl-1.33
%U https://doi.org/10.1162/tacl_a_00328
%P 504-521
Markdown (Informal)
[PERL: Pivot-based Domain Adaptation for Pre-trained Deep Contextualized Embedding Models](https://aclanthology.org/2020.tacl-1.33) (Ben-David et al., TACL 2020)
ACL