@inproceedings{ilharco-etal-2020-high,
title = "High Performance Natural Language Processing",
author = "Ilharco, Gabriel and
Ilharco, Cesar and
Turc, Iulia and
Dettmers, Tim and
Ferreira, Felipe and
Lee, Kenton",
editor = "Villavicencio, Aline and
Van Durme, Benjamin",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-tutorials.4",
doi = "10.18653/v1/2020.emnlp-tutorials.4",
pages = "24--27",
abstract = "Scale has played a central role in the rapid progress natural language processing has enjoyed in recent years. While benchmarks are dominated by ever larger models, efficient hardware use is critical for their widespread adoption and further progress in the field. In this cutting-edge tutorial, we will recapitulate the state-of-the-art in natural language processing with scale in perspective. After establishing these foundations, we will cover a wide range of techniques for improving efficiency, including knowledge distillation, quantization, pruning, more efficient architectures, along with case studies and practical implementation tricks.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="ilharco-etal-2020-high">
<titleInfo>
<title>High Performance Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Gabriel</namePart>
<namePart type="family">Ilharco</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cesar</namePart>
<namePart type="family">Ilharco</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Iulia</namePart>
<namePart type="family">Turc</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tim</namePart>
<namePart type="family">Dettmers</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Felipe</namePart>
<namePart type="family">Ferreira</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kenton</namePart>
<namePart type="family">Lee</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts</title>
</titleInfo>
<name type="personal">
<namePart type="given">Aline</namePart>
<namePart type="family">Villavicencio</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Benjamin</namePart>
<namePart type="family">Van Durme</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Scale has played a central role in the rapid progress natural language processing has enjoyed in recent years. While benchmarks are dominated by ever larger models, efficient hardware use is critical for their widespread adoption and further progress in the field. In this cutting-edge tutorial, we will recapitulate the state-of-the-art in natural language processing with scale in perspective. After establishing these foundations, we will cover a wide range of techniques for improving efficiency, including knowledge distillation, quantization, pruning, more efficient architectures, along with case studies and practical implementation tricks.</abstract>
<identifier type="citekey">ilharco-etal-2020-high</identifier>
<identifier type="doi">10.18653/v1/2020.emnlp-tutorials.4</identifier>
<location>
<url>https://aclanthology.org/2020.emnlp-tutorials.4</url>
</location>
<part>
<date>2020-11</date>
<extent unit="page">
<start>24</start>
<end>27</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T High Performance Natural Language Processing
%A Ilharco, Gabriel
%A Ilharco, Cesar
%A Turc, Iulia
%A Dettmers, Tim
%A Ferreira, Felipe
%A Lee, Kenton
%Y Villavicencio, Aline
%Y Van Durme, Benjamin
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F ilharco-etal-2020-high
%X Scale has played a central role in the rapid progress natural language processing has enjoyed in recent years. While benchmarks are dominated by ever larger models, efficient hardware use is critical for their widespread adoption and further progress in the field. In this cutting-edge tutorial, we will recapitulate the state-of-the-art in natural language processing with scale in perspective. After establishing these foundations, we will cover a wide range of techniques for improving efficiency, including knowledge distillation, quantization, pruning, more efficient architectures, along with case studies and practical implementation tricks.
%R 10.18653/v1/2020.emnlp-tutorials.4
%U https://aclanthology.org/2020.emnlp-tutorials.4
%U https://doi.org/10.18653/v1/2020.emnlp-tutorials.4
%P 24-27
Markdown (Informal)
[High Performance Natural Language Processing](https://aclanthology.org/2020.emnlp-tutorials.4) (Ilharco et al., EMNLP 2020)
ACL
- Gabriel Ilharco, Cesar Ilharco, Iulia Turc, Tim Dettmers, Felipe Ferreira, and Kenton Lee. 2020. High Performance Natural Language Processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Tutorial Abstracts, pages 24–27, Online. Association for Computational Linguistics.