Satisfying gif – Stablediffusion text to image steps

Sample code to generate the gif

import torch
from diffusers import StableDiffusionPipeline
from PIL import Image

model_id = "OFA-Sys/small-stable-diffusion-v0"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
pipe = pipe.to("cpu")
# use float16 if you run it on CUDA, also use the proper device "cuda" 

vae = pipe.vae
images = []

def latents_callback(i, t, latents):
    latents = 1 / 0.18215 * latents
    image = vae.decode(latents).sample[0]
    image = (image / 2 + 0.5).clamp(0, 1)
    image = image.cpu().permute(1, 2, 0).numpy()
    images.extend(pipe.numpy_to_pil(image))

prompt = "mountain landscape with trees and exposed rock"
final_image = pipe(prompt, callback=latents_callback, callback_steps=1, num_inference_steps=200).images[0]  

if len(images) > 0:
    images[0].save("diffusion_output.gif", save_all=True, append_images=images[1:], duration=200, loop=0)