one / app.py
Sabbirr12's picture
Update app.py
825e7a5 verified
import gradio as gr
from diffusers import StableDiffusionPipeline, DiffusionPipeline
import torch
from PIL import Image
# Load Text ➀ Image model
text2img = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16
).to("cuda")
# Load Image ➀ Video model
img2vid = DiffusionPipeline.from_pretrained(
"damo-vilab/image-to-video", torch_dtype=torch.float16, variant="fp16"
).to("cuda")
def generate_image(prompt):
image = text2img(prompt).images[0]
return image
def generate_video(image):
video_frames = img2vid(image).frames
return video_frames
with gr.Blocks() as demo:
gr.Markdown("## 🎨 Text ➀ Image ➀ Video Generator")
with gr.Row():
prompt = gr.Textbox(label="πŸ“ Enter Prompt")
img_output = gr.Image(label="πŸ–ΌοΈ Generated Image")
btn_img = gr.Button("Generate Image")
with gr.Row():
img_input = gr.Image(label="πŸ“₯ Drop Image Here")
vid_output = gr.Video(label="🎬 Generated Video")
btn_vid = gr.Button("Generate Video")
btn_img.click(fn=generate_image, inputs=prompt, outputs=img_output)
btn_vid.click(fn=generate_video, inputs=img_input, outputs=vid_output)
demo.launch()