We present TALE, a novel training-free framework harnessing the power of text-driven diffusion models to tackle cross-domain image composition task that aims at seamlessly incorporating user-provided objects into a specific visual context regardless of domain disparity. Previous methods often involve either training auxiliary networks or finetuning diffusion models on customized datasets, which are expensive and may undermine the robust textual and visual priors of pretrained diffusion models. Some recent works attempt to break the barrier by proposing training-free workarounds that rely on manipulating attention maps to tame the denoising process implicitly. However, composing via attention maps does not necessarily yield desired compositional outcomes. These approaches could only retain some semantic information and usually fall short in preserving identity characteristics of input objects or exhibit limited background-object style adaptation in generated images. In contrast, TALE is a novel method that operates directly on latent space to provide explicit and effective guidance for the composition process to resolve these problems. Specifically, we equip TALE with two mechanisms dubbed Adaptive Latent Manipulation and Energy-guided Latent Optimization. The former formulates noisy latents conducive to initiating and steering the composition process by directly leveraging background and foreground latents at corresponding timesteps, and the latter exploits designated energy functions to further optimize intermediate latents conforming to specific conditions that complement the former to generate desired final results. Our experiments demonstrate that TALE surpasses prior baselines and attains state-of-the-art performance in image-guided composition across various photorealistic and artistic domains.
Qualitative comparison of TALE with prior SOTA and concurrent works in cross-domain image-guided composition.
Photorealism-Sketching cross-domain composition results.
Photorealism-Oil painting cross-domain composition results.
Photorealism same-domain composition results.
Photorealism-Comic cross-domain composition results.
Photorealism-Watercolor painting cross-domain composition results.
Photorealism-Cartoon animation cross-domain composition results.
Quantitative comparison of TALE with prior SOTA works in cross-domain composition on the baseline benchmark with sketching, oil painting, and cartoon animation domains, and on the extended benchmark containing mixture of other domains such as comic and watercolor painting.
@article{pham2024tale,
title={TALE: Training-free Cross-domain Image Composition via Adaptive Latent Manipulation and Energy-guided Optimization},
author={Pham, Kien T and Chen, Jingye and Chen, Qifeng},
journal={arXiv preprint arXiv:2408.03637},
year={2024}
}
@inproceedings{10.1145/3664647.3681079,
author = {Pham, Kien T. and Chen, Jingye and Chen, Qifeng},
title = {TALE: Training-free Cross-domain Image Composition via Adaptive Latent Manipulation and Energy-guided Optimization},
year = {2024},
isbn = {9798400706868},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3664647.3681079},
doi = {10.1145/3664647.3681079},
abstract = {We present TALE, a novel training-free framework harnessing the generative capabilities of text-to-image diffusion models to address the cross-domain image composition task that focuses on flawlessly incorporating user-specified objects into a designated visual contexts regardless of domain disparity. Previous methods often involve either training auxiliary networks or finetuning diffusion models on customized datasets, which are expensive and may undermine the robust textual and visual priors of pre-trained diffusion models. Some recent works attempt to break the barrier by proposing training-free workarounds that rely on manipulating attention maps to tame the denoising process implicitly. However, composing via attention maps does not necessarily yield desired compositional outcomes. These approaches could only retain some semantic information and usually fall short in preserving identity characteristics of input objects or exhibit limited background-object style adaptation in generated images. In contrast, TALE is a novel method that operates directly on latent space to provide explicit and effective guidance for the composition process to resolve these problems. Specifically, we equip TALE with two mechanisms dubbed Adaptive Latent Manipulation and Energy-guided Latent Optimization. The former formulates noisy latents conducive to initiating and steering the composition process by directly leveraging background and foreground latents at corresponding timesteps, and the latter exploits designated energy functions to further optimize intermediate latents conforming to specific conditions that complement the former to generate desired final results. Our experiments demonstrate that TALE surpasses prior baselines and attains state-of-the-art performance in image-guided composition across various photorealistic and artistic domains.},
booktitle = {Proceedings of the 32nd ACM International Conference on Multimedia},
pages = {3160–3169},
numpages = {10},
keywords = {adaptive latent manipulation, cross-domain, diffusion models, energy-guided optimization, image composition, training-free},
location = {Melbourne VIC, Australia},
series = {MM '24}
}