@inproceedings{shi-et-al-acmmm-19,
abstract = {With the rapid growth of video data and the increasing demands of various applications such as intelligent video search and assistance toward visually-impaired people, video captioning task has received a lot of attention recently in computer vision and natural language processing fields. The state-of-the-art video captioning methods focus more on encoding the temporal information, while lack of effective ways to remove irrelevant temporal information and also neglecting the spatial details. However, the current RNN encoding module in single time order can be influenced by the irrelevant temporal information, especially the irrelevant temporal information is at the beginning of the encoding. In addition, neglecting spatial information will lead to the relationship confusion of the words and detailed loss. Therefore, in this paper, we propose a novel recurrent video encoding method and a novel visual spatial feature for the video captioning task. The recurrent encoding module encodes the video twice with the predicted key frame to avoid the irrelevant temporal information often occurring at the beginning and the end of a video. The novel spatial features represent the spatial information in different regions of a video and enrich the details of a caption. Experiments on two benchmark datasets show superior performance of the proposed method.},
address = {Nice, France},
author = {Xiangxi Shi and Jianfei Cai and Shafiq Joty and Jiuxiang Gu},
booktitle = {Proceedings of the 27th ACM International Conference on Multimedia},
numpages = {10},
pages = {818–826},
publisher = {ACM},
series = {ACMMM'19},
title = {Watch It Twice: Video Captioning with a Refocused Video Encoder},
url = {https://dl.acm.org/doi/10.1145/3343031.3351060},
year = {2019}
}