@inproceedings{Media2Face_SIG2024, author = {Zhao, Qingcheng and Long, Pengyu and Zhang, Qixuan and Qin, Dafei and Liang, Han and Zhang, Longwen and Zhang, Yingliang and Yu, Jingyi and Xu, Lan}, title = {Media2Face: Co-speech Facial Animation Generation With Multi-Modality Guidance}, year = {2024}, isbn = {9798400705250}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3641519.3657413}, doi = {10.1145/3641519.3657413}, abstract = {The synthesis of 3D facial animations from speech has garnered considerable attention. Due to the scarcity of high-quality 4D facial data and well-annotated abundant multi-modality labels, previous methods often suffer from limited realism and a lack of flexible conditioning. We address this challenge through a trilogy. We first introduce Generalized Neural Parametric Facial Asset (GNPFA), an efficient variational auto-encoder mapping facial geometry and images to a highly generalized expression latent space, decoupling expressions and identities. Then, we utilize GNPFA to extract high-quality expressions and accurate head poses from a large array of videos. This presents the M2F-D dataset, a large, diverse, and scan-level co-speech 3D facial animation dataset with well-annotated emotion and style labels. Finally, we propose Media2Face, a diffusion model in GNPFA latent space for co-speech facial animation generation, accepting rich multi-modality guidances from audio, text, and image. Extensive experiments demonstrate that our model not only achieves high fidelity in facial animation synthesis but also broadens the scope of expressiveness and style adaptability in 3D facial animation.}, booktitle = {ACM SIGGRAPH 2024 Conference Papers}, articleno = {18}, numpages = {13}, keywords = {Audio-driven Animation, Computer Graphics, Computer Vision, Deep learning, Facial Animation}, location = {Denver, CO, USA}, series = {SIGGRAPH '24} }