willsh1997 commited on
Commit
1bec58f
·
1 Parent(s): 4c86c32

:sparkles: initial commit

Browse files
.github/workflows/push_to_hub.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ with:
15
+ fetch-depth: 0
16
+ lfs: true
17
+ - name: Push to hub
18
+ env:
19
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
20
+ run: git push https://willsh1997:$HF_TOKEN@huggingface.co/spaces/willsh1997/neutral-sd-dev main
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Neutral Sd Dev
3
+ emoji: 👁
4
+ colorFrom: red
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 5.12.0
8
+ app_file: gradio_neutral_input_func.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ short_description: neutral sd gradio dev space
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
gradio_neutral_input_func.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ from PIL import Image
4
+ import io
5
+ import json
6
+ import uuid
7
+ import os
8
+ from stable_diffusion_demo import StableDiffusion
9
+
10
+ # Setup directories
11
+ BASE_DIR = os.path.abspath(os.path.dirname(__file__))
12
+ IMAGE_DIR = os.path.join(BASE_DIR, "neutral_images_storage")
13
+ os.makedirs(IMAGE_DIR, exist_ok=True)
14
+
15
+ def generate_image():
16
+ """Generate a neutral image using Stable Diffusion"""
17
+ generated_image = StableDiffusion(
18
+ uncond_embeddings=[''],
19
+ text_embeddings=[''],
20
+ height=512,
21
+ width=512,
22
+ num_inference_steps=25,
23
+ guidance_scale=7.5,
24
+ seed=None,
25
+ )
26
+ return generated_image
27
+
28
+ def save_image_and_description(image, description):
29
+ """Save the generated image and its description"""
30
+ if image is None:
31
+ return "No image to save!", None, None
32
+
33
+ if not description:
34
+ return "Please provide a description!", None, None
35
+
36
+ try:
37
+ image_id = uuid.uuid4()
38
+ save_path = os.path.join(IMAGE_DIR, f"{image_id}.png")
39
+ json_path = os.path.join(IMAGE_DIR, f"{image_id}.json")
40
+
41
+ # Save image
42
+ image.save(save_path)
43
+
44
+ # Save description
45
+ desc_json = {"description": description}
46
+ with open(json_path, "w") as f:
47
+ json.dump(desc_json, f)
48
+
49
+ # Return success message, clear the image output, and return updated gallery
50
+ return "Saved successfully!", None, load_previous_examples()
51
+ except Exception as e:
52
+ return f"Error saving: {str(e)}", None, None
53
+
54
+ def load_previous_examples():
55
+ """Load all previously saved images and descriptions"""
56
+ examples = []
57
+ for file in os.listdir(IMAGE_DIR):
58
+ if file.endswith(".png"):
59
+ image_id = file.replace(".png", "")
60
+ image_path = os.path.join(IMAGE_DIR, f"{image_id}.png")
61
+ json_path = os.path.join(IMAGE_DIR, f"{image_id}.json")
62
+
63
+ if os.path.exists(json_path):
64
+ image = Image.open(image_path)
65
+ with open(json_path, "r") as f:
66
+ desc = json.load(f)["description"]
67
+ examples.append((image, desc))
68
+ return examples
69
+
70
+ # Create the Gradio interface
71
+ with gr.Blocks(title="Neutral Image App") as demo:
72
+ gr.Markdown("# Neutral Image App")
73
+
74
+ with gr.Row():
75
+ with gr.Column():
76
+ generate_btn = gr.Button("Generate Image")
77
+ # Disable image upload by setting interactive=False
78
+ image_output = gr.Image(type="pil", label="Generated Image", interactive=False)
79
+ description_input = gr.Textbox(label="Describe the image", lines=3)
80
+ save_btn = gr.Button("Save Image and Description")
81
+ status_output = gr.Textbox(label="Status")
82
+
83
+ with gr.Accordion("Previous Examples", open=False):
84
+ gallery = gr.Gallery(
85
+ label="Previous Images",
86
+ show_label=True,
87
+ elem_id="gallery"
88
+ )#.style(grid=2, height="auto")
89
+
90
+ # Set up event handlers
91
+ generate_btn.click(
92
+ fn=generate_image,
93
+ outputs=[image_output]
94
+ )
95
+
96
+ # Updated to include gallery refresh in outputs
97
+ save_btn.click(
98
+ fn=save_image_and_description,
99
+ inputs=[image_output, description_input],
100
+ outputs=[status_output, image_output, gallery] # Added gallery to outputs
101
+ )
102
+
103
+ # Load previous examples on startup
104
+ demo.load(
105
+ fn=load_previous_examples,
106
+ outputs=[gallery]
107
+ )
108
+
109
+ # Launch the app
110
+ if __name__ == "__main__":
111
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ pillow
2
+ tqdm
3
+ torch
4
+ transformers
5
+ diffusers
6
+ torchvision
7
+ spaces
8
+ gradio
stable_diffusion_demo.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ from tqdm.auto import tqdm
3
+ import torch
4
+ from transformers import CLIPTextModel, CLIPTokenizer
5
+ from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler, LMSDiscreteScheduler, StableDiffusionPipeline, UniPCMultistepScheduler
6
+ from torchvision import transforms
7
+ import spaces
8
+
9
+ torch_device = "cuda" if torch.cuda.is_available() else ("mps" if torch.mps.is_available() else "cpu")
10
+
11
+ torch_dtype = torch.float16 if torch_device in ["cuda", "mps"] else torch.float32
12
+
13
+ pipe = StableDiffusionPipeline.from_pretrained(
14
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
15
+ torch_dtype=torch_dtype,
16
+ use_safetensors=True,
17
+ safety_checker = None).to(torch_device)
18
+
19
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
20
+
21
+ # pipe.enable_model_cpu_offload() <--- disable for ZeroGPU
22
+
23
+ @spaces.GPU
24
+ def StableDiffusion(uncond_embeddings, text_embeddings, height, width, num_inference_steps, guidance_scale, seed):
25
+ batch_size=1
26
+
27
+ generator = None
28
+
29
+ if seed:
30
+ generator=torch.manual_seed(seed)
31
+
32
+ output = pipe(
33
+ prompt = text_embeddings,
34
+ negative_prompt = uncond_embeddings,
35
+ height = height,
36
+ width = width,
37
+ num_inference_steps = num_inference_steps,
38
+ guidance_scale = guidance_scale,
39
+ generator = generator
40
+ ).images[0]
41
+ return output
42
+