@inproceedings{samson-naacl-21,
abstract = {Multilingual models have demonstrated impressive cross-lingual transfer performance. However, test sets like XNLI are monolingual at the example level. In multilingual communities, it is common for polyglots to code-mix when conversing with each other. Inspired by this phenomenon, we present two strong black-box adversarial attacks (one word-level, one phrase-level) for multilingual models that push their ability to handle code-mixed sentences to the limit. The former uses bilingual dictionaries to propose perturbations and translations of the clean example for sense disambiguation. The latter directly aligns the clean example with its translations before extracting phrases as perturbations. Our phrase-level attack has a success rate of 89.75\% against XLM-R$_\text{large}$, bringing its average accuracy of 79.85 down to 8.18 on XNLI. Finally, we propose an efficient adversarial training scheme that trains in the same number of steps as the original model and show that it improves model accuracy.},
address = {Mexico City, Mexico},
author = {Samson Tan and Shafiq Joty},
booktitle = {Proceedings of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies},
numpages = {9},
pages = {xx–-xx},
publisher = {ACL},
series = {NAACL'21},
title = {Code-Mixing on Sesame Street: Dawn of the Adversarial Polyglots},
url = {https://aclanthology.org/2021.naacl-main.282.pdf},
year = {2021}
}