File size: 1,239 Bytes
825e7a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from diffusers import StableDiffusionPipeline, DiffusionPipeline
import torch
from PIL import Image

# Load Text ➀ Image model
text2img = StableDiffusionPipeline.from_pretrained(
    "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
).to("cuda")

# Load Image ➀ Video model
img2vid = DiffusionPipeline.from_pretrained(
    "damo-vilab/image-to-video", torch_dtype=torch.float16, variant="fp16"
).to("cuda")

def generate_image(prompt):
    image = text2img(prompt).images[0]
    return image

def generate_video(image):
    video_frames = img2vid(image).frames
    return video_frames

with gr.Blocks() as demo:
    gr.Markdown("## 🎨 Text ➀ Image ➀ Video Generator")

    with gr.Row():
        prompt = gr.Textbox(label="πŸ“ Enter Prompt")
        img_output = gr.Image(label="πŸ–ΌοΈ Generated Image")
        btn_img = gr.Button("Generate Image")

    with gr.Row():
        img_input = gr.Image(label="πŸ“₯ Drop Image Here")
        vid_output = gr.Video(label="🎬 Generated Video")
        btn_vid = gr.Button("Generate Video")

    btn_img.click(fn=generate_image, inputs=prompt, outputs=img_output)
    btn_vid.click(fn=generate_video, inputs=img_input, outputs=vid_output)

demo.launch()