@inproceedings{nguyen2022umt,
abstract = {Numerous recent work on unsupervised machine translation (UMT) implies that competent unsupervised translations of low-resource and unrelated languages, such as Nepali or Sinhala, are only possible if the model is trained in a massive multilingual environment, where these low-resource languages are mixed with
high-resource counterparts. Nonetheless, while the high-resource languages greatly help kick-start the target low-resource translation tasks, the language discrepancy between them may hinder their further improvement. In this work, we propose a simple refinement procedure to disentangle languages from a pre-trained multilingual UMT model for it to focus on only the target low-resource task. Our method achieves the state of the art in the fully unsupervised translation tasks of English to Nepali, Sinhala, Gujarati, Latvian, Estonian and Kazakh, with BLEU score gains of 3.5, 3.5, 3.3, 4.1, 4.2, and 3.3, respectively. Our codebase is available at anonymous.4open.science/r/fairseq-py-BB44.},
address = {New Orleans, USA},
author = {Xuan-Phi Nguyen and Shafiq Joty and Wu Kui and Ai Ti Aw},
booktitle = {2022 Conference on Neural Information Processing Systems},
numpages = {9},
publisher = {},
series = {NeurIPS'22},
title = {Refining Low-Resource Unsupervised Translation by Language Disentanglement of Multilingual Translation Model},
url = {https://arxiv.org/abs/2205.15544},
year = {2022}
}