@inproceedings{linlin-et-al-acl-21,
abstract = {Named Entity Recognition (NER) for low-resource languages is a both practical and challenging research problem. This paper addresses zero-shot transfer for cross-lingual NER, especially when the amount of source-language training data is also limited. The paper first proposes a simple but effective labeled sequence translation method to translate source-language training data to target languages and avoids problems such as word order change and entity span determination. With the source-language data as well as the translated data, a generation-based multilingual data augmentation method is introduced to further increase diversity by generating synthetic labeled data in multiple languages. These augmented data enable the language model based NER models to generalize better with both the language-specific features from the target-language synthetic data and the language-independent features from multilingual synthetic data. An extensive set of experiments were conducted to demonstrate encouraging cross-lingual transfer performance of the new research on a wide variety of target languages. The code and data in this work will be made public for the research community.},
address = {Bangkok, Thailand},
author = {Linlin Liu and Bosheng Ding and Lidong Bing and Shafiq Joty and Luo Si and Chunyan Miao},
booktitle = {Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics},
numpages = {9},
pages = {xx–-xx},
publisher = {ACL},
series = {ACL'21},
title = {MulDA: A Multilingual Data Augmentation Framework for Low-Resource Cross-Lingual NER},
url = {https://aclanthology.org/2021.acl-long.453/},
year = {2021}
}