import gradio as gr import torch from diffusers import StableDiffusionPipeline from PIL import Image import traceback from typing import Optional model_id: str = "runwayml/stable-diffusion-v1-5" device: str = "cpu" # force CPU usage for compatibility image_generator_pipe: Optional[StableDiffusionPipeline] = None pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) image_generator_pipe = pipe.to(device) def generate_image_sd(prompt: str, negative_prompt: str, guidance_scale: float, num_inference_steps: int) -> Image.Image: with torch.no_grad(): output = image_generator_pipe( prompt=prompt, negative_prompt=negative_prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps ) image = output.images[0] if output.images else None if not image: raise RuntimeError("No image was returned from the generation pipeline.") return image with gr.Blocks(theme=gr.themes.Soft()) as demo: with gr.Row(): with gr.Column(scale=1): prompt = gr.Textbox(label="Prompt", placeholder="A beautiful futuristic city skyline at night") neg_prompt = gr.Textbox(label="Negative Prompt", placeholder="blurry, distorted, watermark") guidance = gr.Slider(1.0, 15.0, value=7.5, step=0.5, label="Guidance Scale") steps = gr.Slider(10, 50, value=25, step=1, label="Inference Steps") generate_btn = gr.Button("Generate Image") with gr.Column(scale=1): output_image = gr.Image(label="Generated Image", type="pil") generate_btn.click( fn=generate_image_sd, inputs=[prompt, neg_prompt, guidance, steps], outputs=output_image ) if __name__ == "__main__": if not image_generator_pipe: print("WARNING: Image generator pipeline is not available. UI will launch, but generation will fail.") demo.launch(server_name="0.0.0.0", server_port=7860)