Publications
Schütz, Mina; Schindler, Alexander; Siegel, Melanie; Nazemi, Kawa Automatic Fake News Detection with Pre-trained Transformer Models Proceedings Article In: Bimbo, Alberto Del; Cucchiara, Rita; Sclaroff, Stan; Farinella, Giovanni Maria; Mei, Tao; Bertini, Marco; Escalante, Hugo Jair; Vezzani, Roberto (Ed.): Pattern Recognition. ICPR International Workshops and Challenges, pp. 627–641, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-68787-8. Abstract | Links | BibTeX | Tags: Artificial Intelligence, datamining, Decision Making, Fake News, Machine Leanring, Transformer2021
@inproceedings{10.1007/978-3-030-68787-8_45,
title = {Automatic Fake News Detection with Pre-trained Transformer Models},
author = {Mina Schütz and Alexander Schindler and Melanie Siegel and Kawa Nazemi},
editor = {Alberto Del Bimbo and Rita Cucchiara and Stan Sclaroff and Giovanni Maria Farinella and Tao Mei and Marco Bertini and Hugo Jair Escalante and Roberto Vezzani},
url = {https://link.springer.com/chapter/10.1007/978-3-030-68787-8_45, Full PDF},
doi = {10.1007/978-3-030-68787-8_45},
isbn = {978-3-030-68787-8},
year = {2021},
date = {2021-02-21},
booktitle = {Pattern Recognition. ICPR International Workshops and Challenges},
pages = {627--641},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {The automatic detection of disinformation and misinformation has gained attention during the last years, since fake news has a critical impact on democracy, society, and journalism and digital literacy. In this paper, we present a binary content-based classification approach for detecting fake news automatically, with several recently published pre-trained language models based on the Transformer architecture. The experiments were conducted on the FakeNewsNet dataset with XLNet, BERT, RoBERTa, DistilBERT, and ALBERT and various combinations of hyperparameters. Different preprocessing steps were carried out with only using the body text, the titles and a concatenation of both. It is concluded that Transformers are a promising approach to detect fake news, since they achieve notable results, even without using a large dataset. Our main contribution is the enhancement of fake news' detection accuracy through different models and parametrizations with a reproducible result examination through the conducted experiments. The evaluation shows that already short texts are enough to attain 85% accuracy on the test set. Using the body text and a concatenation of both reach up to 87% accuracy. Lastly, we show that various preprocessing steps, such as removing outliers, do not have a significant impact on the models prediction output.},
keywords = {Artificial Intelligence, datamining, Decision Making, Fake News, Machine Leanring, Transformer},
pubstate = {published},
tppubtype = {inproceedings}
}