@inproceedings{Tao-emnlp-21,
abstract = {Large scale multilingual pre-trained language models have shown promising results in zero- and few-shot cross-lingual tasks. However, recent studies have shown their lack of generalizability when the languages are structurally dissimilar. In this work, we propose a novel fine-tuning method based on co-training that aims to learn more generalized semantic equivalences as complementary to multilingual language modeling using the unlabeled data in the target language. We also propose an adaption method based on contrastive learning to better capture the semantic relationship in the parallel data, when a few translation pairs are available. To show our method's effectiveness, we conduct extensive experiments on cross-lingual inference and review classification tasks across various languages. We report significant gains compared to directly fine-tuning multilingual pre-trained models and other semi-supervised alternatives.\footnote{Code and models are available at \scriptsize{\urlstyle{tt}\url{}}}.},
address = {Online},
author = {Tao Yu and Shafiq Joty},
booktitle = {the 2021 Conference on Empirical Methods in Natural Language Processing},
publisher = {ACL},
series = {EMNLP'21},
title = {Effective Fine-tuning Methods for Cross-lingual Adaptation},
url = {https://aclanthology.org/2021.emnlp-main.668/},
year = {2021}
}