1

Below is the contents of my .bib file:

@paper{Pinter2019,
  title = {Attention is not not explanation},
  author = {Wiegreffe, S. and Pinter, Y.},
  journal = {Conference on Empirical Methods in Natural Langauge Processing and 9th International Joint Conference on Natural Langauge Processing, arXiv:1908.04626},
  year = {2019},
  url = {https://arxiv.org/abs/1908.04626}
}

@paper{JainAndWallace2019,
  title = {Attention is not explanation},
  author = {Jain, S. and Wallace, B. C.},
  journal = {Annual Conference of the North American Chapter of the Association for Computational Linguistics,  arXiv:1902.10186},
  year = {2019},
  url = {https://arxiv.org/abs/1902.10186}
}

@paper{SurveyOfAttentionModels,
  title = {An Attentive Survey of Attention Models},
  author = {Chaudhari, S., Polatkan, G., Ramanath, R. and Mithal, V.},
  journal = {arXiv:1904.02874},
  year = {2019},
  url = {https://arxiv.org/abs/1904.02874}
}

@paper{Vig2019,
  title = {Analyzing the Structure of Attention in a Transformer Language Model},
  author = {Vig, J., and Belinkov, Y.},
  journal = {Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, DOI: 10.18653/v1/W19-4808},
  year = {2019},
  url = {https://www.aclweb.org/anthology/W19-4808/}
}

@article{GPT22019,
  title = {Language Models are Unsupervised Multitask Learners},
  author = {Radford, A., Wu, J., Child, R., Luan, D., Amodei, D. and Sutskever, I.},
  year = {2019},
  url = {https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf}
}

@paper{TransformerXL,
  title = {Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context},
  author = {Dai, Z., Yang,  Z., Yang, Y., Carbonell, J., Le, Q. V. and Salakhutdinov, R.},
  journal = {Annual Meeting of the Association for Computational Linguistics, arXiv:1901.02860},
  year = {2019},
  url = {https://arxiv.org/abs/1901.02860}
}

@paper{Raganato2018,
  title = {An Analysis of Encoder Representations in Transformer-Based Machine Translation},
  author = {Raganato, A., and Tiedemann, J.},
  journal = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, DOI: 10.18653/v1/W18-5431},
  year = {2018},
  url = {https://www.aclweb.org/anthology/W18-5431/}
}

@article{BERT,
  title = {BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},
  author = {Devlin, J., Chang, M-W., Lee, K. and Toutanova, K},
  journal = {arXiv:1810.04805},
  year = {2018},
  url = {https://arxiv.org/abs/1810.04805}
}

@paper{Vaswani2017,
  title = {Attention is all you need},
  author = {Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez,  A. N., Kaiser, L. and Polosukhin, I.},
  journal = {31st Conference on Neural Information Processing Systems (NIPS 2017), arXiv:1706.03762},
  year = {2017},
  url = {https://arxiv.org/abs/1706.03762}
}

@paper{Multimedia,
  title = {Describing multimedia content using attention-based encoder-decoder networks},
  author = {Cho, K., Courville, A. and Bengio, Y.},
  journal = {IEEE Transactions on Multimedia, 17(11):1875–1886},
  year = {2015},
  url = {https://arxiv.org/pdf/1507.01053.pdf}
}

when I save the above file as bibliography.bib and use it for my references, the system throws an error that says:

Error reading bibliography ./bibliography.bib (line 10, column 7):
unexpected "\\"
expecting letter, white space or "{"
Error running filter /Applications/RStudio.app/Contents/MacOS/pandoc/pandoc-citeproc:
Filter returned error status 1
Error: pandoc document conversion failed with error 83

Why is this error popping up?

Thank you,

chico0913
  • 138

1 Answers1

1

The .bib file is malformed.

There is no @paper type in the most common styles. Also the lists of authors are wrong: every author must be separated from the next with and. There should be no comma before and.

Here's a fixed version: I used @article instead of @paper, but probably @misc should be chose for arXiv entries.

@article{Pinter2019,
  title = {Attention is not not explanation},
  author = {Wiegreffe, S. and Pinter, Y.},
  journal = {Conference on Empirical Methods in Natural Langauge Processing and 9th International Joint Conference on Natural Langauge Processing, arXiv:1908.04626},
  year = {2019},
  url = {https://arxiv.org/abs/1908.04626}
}

@article{JainAndWallace2019,
  title = {Attention is not explanation},
  author = {Jain, S. and Wallace, B. C.},
  journal = {Annual Conference of the North American Chapter of the Association for Computational Linguistics,  arXiv:1902.10186},
  year = {2019},
  url = {https://arxiv.org/abs/1902.10186}
}

@article{SurveyOfAttentionModels,
  title = {An Attentive Survey of Attention Models},
  author = {Chaudhari, S. and Polatkan, G. and Ramanath, R. and Mithal, V.},
  journal = {arXiv:1904.02874},
  year = {2019},
  url = {https://arxiv.org/abs/1904.02874}
}

@article{Vig2019,
  title = {Analyzing the Structure of Attention in a Transformer Language Model},
  author = {Vig, J. and Belinkov, Y.},
  journal = {Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, DOI: 10.18653/v1/W19-4808},
  year = {2019},
  url = {https://www.aclweb.org/anthology/W19-4808/}
}

@article{GPT22019,
  title = {Language Models are Unsupervised Multitask Learners},
  author = {Radford, A. and Wu, J. and Child, R. and Luan, D. and Amodei, D. and Sutskever, I.},
  year = {2019},
  url = {https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf}
}

@article{TransformerXL,
  title = {Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context},
  author = {Dai, Z. and Yang,  Z. and Yang, Y. and Carbonell, J. and Le, Q. V. and Salakhutdinov, R.},
  journal = {Annual Meeting of the Association for Computational Linguistics, arXiv:1901.02860},
  year = {2019},
  url = {https://arxiv.org/abs/1901.02860}
}

@article{Raganato2018,
  title = {An Analysis of Encoder Representations in Transformer-Based Machine Translation},
  author = {Raganato, A. and Tiedemann, J.},
  journal = {Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, DOI: 10.18653/v1/W18-5431},
  year = {2018},
  url = {https://www.aclweb.org/anthology/W18-5431/}
}

@article{BERT,
  title = {BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding},
  author = {Devlin, J. and Chang, M-W. and Lee, K. and Toutanova, K},
  journal = {arXiv:1810.04805},
  year = {2018},
  url = {https://arxiv.org/abs/1810.04805}
}

@article{Vaswani2017,
  title = {Attention is all you need},
  author = {Vaswani, A. and Shazeer, N. and Parmar, N. and Uszkoreit, J. and Jones, L. and Gomez,  A. N. and Kaiser, L. and Polosukhin, I.},
  journal = {31st Conference on Neural Information Processing Systems (NIPS 2017), arXiv:1706.03762},
  year = {2017},
  url = {https://arxiv.org/abs/1706.03762}
}

@article{Multimedia,
  title = {Describing multimedia content using attention-based encoder-decoder networks},
  author = {Cho, K. and Courville, A. and Bengio, Y.},
  journal = {IEEE Transactions on Multimedia, 17(11):1875–1886},
  year = {2015},
  url = {https://arxiv.org/pdf/1507.01053.pdf}
}
egreg
  • 1,121,712