@article{Ni-et-al-TACL,
abstract = {Recently, large language models (LLMs),
especially those that are pretrained on code, have demonstrated strong capabilities in generating programs from natural language inputs in a few-shot or even zero-shot manner. Despite promising results, there is a notable lack of a comprehensive evaluation of these models’ language-to-code generation capabilities. Existing studies often focus on specific tasks, model architectures, or learning paradigms, leading to a fragmented understanding of the overall landscape. In this work, we present L2CEval, a systematic evaluation of the language-tocode generation capabilities of LLMs on 7 tasks across the domain spectrum of semantic parsing, math reasoning and Python programming, analyzing the factors that potentially affect their performance, such as model size, pretraining data, instruction tuning, and different prompting methods. In addition to assessing model performance, we measure confidence calibration for the models and conduct human evaluations of the output programs. This enables us to identify and analyze the typical failure modes across various tasks and models. L2CEval offers a comprehensive understanding of the capabilities and limitations of LLMs in language-to-code generation. We also release the evaluation framework1 and all model outputs, hoping to lay the groundwork for further future research in this domain.},
author = {Ansong Ni and Pengcheng Yin and Yilun Zhao and Martin Riddell and Troy Feng and Rui Shen and Stephen Yin and Ye Liu and Semih Yavuz and Caiming Xiong and Shafiq Joty and Yingbo Zhou and Dragomir Radev and Arman Cohan},
journal = {Transactions of ACL (TACL)},
series = {TACL},
title = {{L2CEval: Evaluating Language-to-Code Generation Capabilities of Large Language Models}},
url = {https://arxiv.org/pdf/2309.17446v2.pdf},
year = {2024}
}