rzzgate kadirnar commited on
Commit
5219ac1
0 Parent(s):

Duplicate from ArtGAN/Stable-Diffusion-ControlNet-WebUI

Browse files

Co-authored-by: Kadir Nar <[email protected]>

Files changed (33) hide show
  1. .gitattributes +34 -0
  2. README.md +16 -0
  3. app.py +74 -0
  4. diffusion_webui/__init__.py +1 -0
  5. diffusion_webui/diffusion_models/__init__.py +0 -0
  6. diffusion_webui/diffusion_models/controlnet/__init__.py +0 -0
  7. diffusion_webui/diffusion_models/controlnet/controlnet_canny.py +183 -0
  8. diffusion_webui/diffusion_models/controlnet/controlnet_depth.py +187 -0
  9. diffusion_webui/diffusion_models/controlnet/controlnet_hed.py +181 -0
  10. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/__init__.py +0 -0
  11. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py +231 -0
  12. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_depth.py +228 -0
  13. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_hed.py +223 -0
  14. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_mlsd.py +224 -0
  15. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_pose.py +225 -0
  16. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_scribble.py +231 -0
  17. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_seg.py +403 -0
  18. diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/pipeline_stable_diffusion_controlnet_inpaint.py +610 -0
  19. diffusion_webui/diffusion_models/controlnet/controlnet_mlsd.py +173 -0
  20. diffusion_webui/diffusion_models/controlnet/controlnet_pose.py +189 -0
  21. diffusion_webui/diffusion_models/controlnet/controlnet_scribble.py +188 -0
  22. diffusion_webui/diffusion_models/controlnet/controlnet_seg.py +353 -0
  23. diffusion_webui/diffusion_models/stable_diffusion/__init__.py +0 -0
  24. diffusion_webui/diffusion_models/stable_diffusion/img2img_app.py +153 -0
  25. diffusion_webui/diffusion_models/stable_diffusion/inpaint_app.py +148 -0
  26. diffusion_webui/diffusion_models/stable_diffusion/text2img_app.py +167 -0
  27. diffusion_webui/helpers.py +54 -0
  28. diffusion_webui/upscaler_models/codeformer_upscaler.py +81 -0
  29. diffusion_webui/utils/__init__.py +0 -0
  30. diffusion_webui/utils/data_utils.py +12 -0
  31. diffusion_webui/utils/model_list.py +48 -0
  32. diffusion_webui/utils/scheduler_list.py +47 -0
  33. requirements.txt +9 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Stable Diffusion ControlNet WebUI
3
+ emoji: 🚀
4
+ colorFrom: green
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 3.19
8
+ app_file: app.py
9
+ pinned: true
10
+ license: openrail
11
+ tags:
12
+ - making-demos
13
+ duplicated_from: ArtGAN/Stable-Diffusion-ControlNet-WebUI
14
+ ---
15
+
16
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from diffusion_webui.helpers import (
4
+ CodeformerUpscalerGenerator,
5
+ StableDiffusionControlInpaintNetDepthGenerator,
6
+ StableDiffusionControlNetCannyGenerator,
7
+ StableDiffusionControlNetDepthGenerator,
8
+ StableDiffusionControlNetHEDGenerator,
9
+ StableDiffusionControlNetInpaintCannyGenerator,
10
+ StableDiffusionControlNetInpaintHedGenerator,
11
+ StableDiffusionControlNetInpaintMlsdGenerator,
12
+ StableDiffusionControlNetInpaintPoseGenerator,
13
+ StableDiffusionControlNetInpaintScribbleGenerator,
14
+ StableDiffusionControlNetInpaintSegGenerator,
15
+ StableDiffusionControlNetMLSDGenerator,
16
+ StableDiffusionControlNetPoseGenerator,
17
+ StableDiffusionControlNetScribbleGenerator,
18
+ StableDiffusionControlNetSegGenerator,
19
+ StableDiffusionImage2ImageGenerator,
20
+ StableDiffusionInpaintGenerator,
21
+ StableDiffusionText2ImageGenerator,
22
+ )
23
+
24
+
25
+ def main():
26
+ app = gr.Blocks()
27
+ with app:
28
+ with gr.Row():
29
+ with gr.Column():
30
+ with gr.Tab("Text2Img"):
31
+ StableDiffusionText2ImageGenerator.app()
32
+ with gr.Tab("Img2Img"):
33
+ StableDiffusionImage2ImageGenerator.app()
34
+ with gr.Tab("Inpaint"):
35
+ StableDiffusionInpaintGenerator.app()
36
+ with gr.Tab("ControlNet"):
37
+ with gr.Tab("Canny"):
38
+ StableDiffusionControlNetCannyGenerator.app()
39
+ with gr.Tab("Depth"):
40
+ StableDiffusionControlNetDepthGenerator.app()
41
+ with gr.Tab("HED"):
42
+ StableDiffusionControlNetHEDGenerator.app()
43
+ with gr.Tab("MLSD"):
44
+ StableDiffusionControlNetMLSDGenerator.app()
45
+ with gr.Tab("Pose"):
46
+ StableDiffusionControlNetPoseGenerator.app()
47
+ with gr.Tab("Scribble"):
48
+ StableDiffusionControlNetScribbleGenerator.app()
49
+ with gr.Tab("Seg"):
50
+ StableDiffusionControlNetSegGenerator.app()
51
+ with gr.Tab("ControlNet Inpaint"):
52
+ with gr.Tab("Canny"):
53
+ StableDiffusionControlNetInpaintCannyGenerator.app()
54
+ with gr.Tab("Depth"):
55
+ StableDiffusionControlInpaintNetDepthGenerator.app()
56
+ with gr.Tab("HED"):
57
+ StableDiffusionControlNetInpaintHedGenerator.app()
58
+ with gr.Tab("MLSD"):
59
+ StableDiffusionControlNetInpaintMlsdGenerator.app()
60
+ with gr.Tab("Pose"):
61
+ StableDiffusionControlNetInpaintPoseGenerator.app()
62
+ with gr.Tab("Scribble"):
63
+ StableDiffusionControlNetInpaintScribbleGenerator.app()
64
+ with gr.Tab("Seg"):
65
+ StableDiffusionControlNetInpaintSegGenerator.app()
66
+ with gr.Tab("Upscaler"):
67
+ CodeformerUpscalerGenerator.app()
68
+
69
+ app.queue(concurrency_count=1)
70
+ app.launch(debug=True, enable_queue=True)
71
+
72
+
73
+ if __name__ == "__main__":
74
+ main()
diffusion_webui/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "2.0.1"
diffusion_webui/diffusion_models/__init__.py ADDED
File without changes
diffusion_webui/diffusion_models/controlnet/__init__.py ADDED
File without changes
diffusion_webui/diffusion_models/controlnet/controlnet_canny.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import numpy as np
4
+ import torch
5
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
6
+ from PIL import Image
7
+
8
+ from diffusion_webui.utils.model_list import (
9
+ controlnet_canny_model_list,
10
+ stable_model_list,
11
+ )
12
+ from diffusion_webui.utils.scheduler_list import (
13
+ SCHEDULER_LIST,
14
+ get_scheduler_list,
15
+ )
16
+
17
+
18
+ class StableDiffusionControlNetCannyGenerator:
19
+ def __init__(self):
20
+ self.pipe = None
21
+
22
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
23
+ if self.pipe is None:
24
+ controlnet = ControlNetModel.from_pretrained(
25
+ controlnet_model_path, torch_dtype=torch.float16
26
+ )
27
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
28
+ pretrained_model_name_or_path=stable_model_path,
29
+ controlnet=controlnet,
30
+ safety_checker=None,
31
+ torch_dtype=torch.float16,
32
+ )
33
+
34
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
35
+ self.pipe.to("cuda")
36
+ self.pipe.enable_xformers_memory_efficient_attention()
37
+
38
+ return self.pipe
39
+
40
+ def controlnet_canny(
41
+ self,
42
+ image_path: str,
43
+ ):
44
+ image = Image.open(image_path)
45
+ image = np.array(image)
46
+
47
+ image = cv2.Canny(image, 100, 200)
48
+ image = image[:, :, None]
49
+ image = np.concatenate([image, image, image], axis=2)
50
+ image = Image.fromarray(image)
51
+
52
+ return image
53
+
54
+ def generate_image(
55
+ self,
56
+ image_path: str,
57
+ stable_model_path: str,
58
+ controlnet_model_path: str,
59
+ prompt: str,
60
+ negative_prompt: str,
61
+ num_images_per_prompt: int,
62
+ guidance_scale: int,
63
+ num_inference_step: int,
64
+ scheduler: str,
65
+ seed_generator: int,
66
+ ):
67
+ pipe = self.load_model(
68
+ stable_model_path=stable_model_path,
69
+ controlnet_model_path=controlnet_model_path,
70
+ scheduler=scheduler,
71
+ )
72
+
73
+ image = self.controlnet_canny(image_path=image_path)
74
+
75
+ if seed_generator == 0:
76
+ random_seed = torch.randint(0, 1000000, (1,))
77
+ generator = torch.manual_seed(random_seed)
78
+ else:
79
+ generator = torch.manual_seed(seed_generator)
80
+
81
+ output = pipe(
82
+ prompt=prompt,
83
+ image=image,
84
+ negative_prompt=negative_prompt,
85
+ num_images_per_prompt=num_images_per_prompt,
86
+ num_inference_steps=num_inference_step,
87
+ guidance_scale=guidance_scale,
88
+ generator=generator,
89
+ ).images
90
+
91
+ return output
92
+
93
+ def app():
94
+ with gr.Blocks():
95
+ with gr.Row():
96
+ with gr.Column():
97
+ controlnet_canny_image_file = gr.Image(
98
+ type="filepath", label="Image"
99
+ )
100
+
101
+ controlnet_canny_prompt = gr.Textbox(
102
+ lines=1,
103
+ placeholder="Prompt",
104
+ show_label=False,
105
+ )
106
+
107
+ controlnet_canny_negative_prompt = gr.Textbox(
108
+ lines=1,
109
+ placeholder="Negative Prompt",
110
+ show_label=False,
111
+ )
112
+ with gr.Row():
113
+ with gr.Column():
114
+ controlnet_canny_stable_model_id = gr.Dropdown(
115
+ choices=stable_model_list,
116
+ value=stable_model_list[0],
117
+ label="Stable Model Id",
118
+ )
119
+
120
+ controlnet_canny_guidance_scale = gr.Slider(
121
+ minimum=0.1,
122
+ maximum=15,
123
+ step=0.1,
124
+ value=7.5,
125
+ label="Guidance Scale",
126
+ )
127
+ controlnet_canny_num_inference_step = gr.Slider(
128
+ minimum=1,
129
+ maximum=100,
130
+ step=1,
131
+ value=50,
132
+ label="Num Inference Step",
133
+ )
134
+ controlnet_canny_num_images_per_prompt = gr.Slider(
135
+ minimum=1,
136
+ maximum=10,
137
+ step=1,
138
+ value=1,
139
+ label="Number Of Images",
140
+ )
141
+ with gr.Row():
142
+ with gr.Column():
143
+ controlnet_canny_model_id = gr.Dropdown(
144
+ choices=controlnet_canny_model_list,
145
+ value=controlnet_canny_model_list[0],
146
+ label="ControlNet Model Id",
147
+ )
148
+
149
+ controlnet_canny_scheduler = gr.Dropdown(
150
+ choices=SCHEDULER_LIST,
151
+ value=SCHEDULER_LIST[0],
152
+ label="Scheduler",
153
+ )
154
+
155
+ controlnet_canny_seed_generator = gr.Number(
156
+ value=0,
157
+ label="Seed Generator",
158
+ )
159
+ controlnet_canny_predict = gr.Button(value="Generator")
160
+
161
+ with gr.Column():
162
+ output_image = gr.Gallery(
163
+ label="Generated images",
164
+ show_label=False,
165
+ elem_id="gallery",
166
+ ).style(grid=(1, 2))
167
+
168
+ controlnet_canny_predict.click(
169
+ fn=StableDiffusionControlNetCannyGenerator().generate_image,
170
+ inputs=[
171
+ controlnet_canny_image_file,
172
+ controlnet_canny_stable_model_id,
173
+ controlnet_canny_model_id,
174
+ controlnet_canny_prompt,
175
+ controlnet_canny_negative_prompt,
176
+ controlnet_canny_num_images_per_prompt,
177
+ controlnet_canny_guidance_scale,
178
+ controlnet_canny_num_inference_step,
179
+ controlnet_canny_scheduler,
180
+ controlnet_canny_seed_generator,
181
+ ],
182
+ outputs=[output_image],
183
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_depth.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from PIL import Image
6
+ from transformers import pipeline
7
+
8
+ from diffusion_webui.utils.model_list import (
9
+ controlnet_depth_model_list,
10
+ stable_model_list,
11
+ )
12
+ from diffusion_webui.utils.scheduler_list import (
13
+ SCHEDULER_LIST,
14
+ get_scheduler_list,
15
+ )
16
+
17
+
18
+ class StableDiffusionControlNetDepthGenerator:
19
+ def __init__(self):
20
+ self.pipe = None
21
+
22
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
23
+ if self.pipe is None:
24
+ controlnet = ControlNetModel.from_pretrained(
25
+ controlnet_model_path, torch_dtype=torch.float16
26
+ )
27
+
28
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
29
+ pretrained_model_name_or_path=stable_model_path,
30
+ controlnet=controlnet,
31
+ safety_checker=None,
32
+ torch_dtype=torch.float16,
33
+ )
34
+
35
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
36
+ self.pipe.to("cuda")
37
+ self.pipe.enable_xformers_memory_efficient_attention()
38
+
39
+ return self.pipe
40
+
41
+ def controlnet_depth(self, image_path: str):
42
+ depth_estimator = pipeline("depth-estimation")
43
+ image = Image.open(image_path)
44
+ image = depth_estimator(image)["depth"]
45
+ image = np.array(image)
46
+ image = image[:, :, None]
47
+ image = np.concatenate([image, image, image], axis=2)
48
+ image = Image.fromarray(image)
49
+
50
+ return image
51
+
52
+ def generate_image(
53
+ self,
54
+ image_path: str,
55
+ stable_model_path: str,
56
+ depth_model_path: str,
57
+ prompt: str,
58
+ negative_prompt: str,
59
+ num_images_per_prompt: int,
60
+ guidance_scale: int,
61
+ num_inference_step: int,
62
+ scheduler: str,
63
+ seed_generator: int,
64
+ ):
65
+ image = self.controlnet_depth(image_path)
66
+
67
+ pipe = self.load_model(
68
+ stable_model_path=stable_model_path,
69
+ controlnet_model_path=depth_model_path,
70
+ scheduler=scheduler,
71
+ )
72
+
73
+ if seed_generator == 0:
74
+ random_seed = torch.randint(0, 1000000, (1,))
75
+ generator = torch.manual_seed(random_seed)
76
+ else:
77
+ generator = torch.manual_seed(seed_generator)
78
+
79
+ output = pipe(
80
+ prompt=prompt,
81
+ image=image,
82
+ negative_prompt=negative_prompt,
83
+ num_images_per_prompt=num_images_per_prompt,
84
+ num_inference_steps=num_inference_step,
85
+ guidance_scale=guidance_scale,
86
+ generator=generator,
87
+ ).images
88
+
89
+ return output
90
+
91
+ def app():
92
+ with gr.Blocks():
93
+ with gr.Row():
94
+ with gr.Column():
95
+ controlnet_depth_image_file = gr.Image(
96
+ type="filepath", label="Image"
97
+ )
98
+
99
+ controlnet_depth_prompt = gr.Textbox(
100
+ lines=1,
101
+ show_label=False,
102
+ placeholder="Prompt",
103
+ )
104
+
105
+ controlnet_depth_negative_prompt = gr.Textbox(
106
+ lines=1,
107
+ show_label=False,
108
+ placeholder="Negative Prompt",
109
+ )
110
+
111
+ with gr.Row():
112
+ with gr.Column():
113
+ controlnet_depth_stable_model_id = gr.Dropdown(
114
+ choices=stable_model_list,
115
+ value=stable_model_list[0],
116
+ label="Stable Model Id",
117
+ )
118
+ controlnet_depth_guidance_scale = gr.Slider(
119
+ minimum=0.1,
120
+ maximum=15,
121
+ step=0.1,
122
+ value=7.5,
123
+ label="Guidance Scale",
124
+ )
125
+
126
+ controlnet_depth_num_inference_step = gr.Slider(
127
+ minimum=1,
128
+ maximum=100,
129
+ step=1,
130
+ value=50,
131
+ label="Num Inference Step",
132
+ )
133
+
134
+ controlnet_depth_num_images_per_prompt = gr.Slider(
135
+ minimum=1,
136
+ maximum=10,
137
+ step=1,
138
+ value=1,
139
+ label="Number Of Images",
140
+ )
141
+ with gr.Row():
142
+ with gr.Column():
143
+ controlnet_depth_model_id = gr.Dropdown(
144
+ choices=controlnet_depth_model_list,
145
+ value=controlnet_depth_model_list[0],
146
+ label="ControlNet Model Id",
147
+ )
148
+
149
+ controlnet_depth_scheduler = gr.Dropdown(
150
+ choices=SCHEDULER_LIST,
151
+ value=SCHEDULER_LIST[0],
152
+ label="Scheduler",
153
+ )
154
+
155
+ controlnet_depth_seed_generator = gr.Number(
156
+ minimum=0,
157
+ maximum=1000000,
158
+ step=1,
159
+ value=0,
160
+ label="Seed Generator",
161
+ )
162
+
163
+ controlnet_depth_predict = gr.Button(value="Generator")
164
+
165
+ with gr.Column():
166
+ output_image = gr.Gallery(
167
+ label="Generated images",
168
+ show_label=False,
169
+ elem_id="gallery",
170
+ ).style(grid=(1, 2))
171
+
172
+ controlnet_depth_predict.click(
173
+ fn=StableDiffusionControlNetDepthGenerator().generate_image,
174
+ inputs=[
175
+ controlnet_depth_image_file,
176
+ controlnet_depth_stable_model_id,
177
+ controlnet_depth_model_id,
178
+ controlnet_depth_prompt,
179
+ controlnet_depth_negative_prompt,
180
+ controlnet_depth_num_images_per_prompt,
181
+ controlnet_depth_guidance_scale,
182
+ controlnet_depth_num_inference_step,
183
+ controlnet_depth_scheduler,
184
+ controlnet_depth_seed_generator,
185
+ ],
186
+ outputs=output_image,
187
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_hed.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import HEDdetector
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from PIL import Image
6
+
7
+ from diffusion_webui.utils.model_list import (
8
+ controlnet_hed_model_list,
9
+ stable_model_list,
10
+ )
11
+ from diffusion_webui.utils.scheduler_list import (
12
+ SCHEDULER_LIST,
13
+ get_scheduler_list,
14
+ )
15
+
16
+
17
+ class StableDiffusionControlNetHEDGenerator:
18
+ def __init__(self):
19
+ self.pipe = None
20
+
21
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
22
+ if self.pipe is None:
23
+ controlnet = ControlNetModel.from_pretrained(
24
+ controlnet_model_path, torch_dtype=torch.float16
25
+ )
26
+
27
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
28
+ pretrained_model_name_or_path=stable_model_path,
29
+ controlnet=controlnet,
30
+ safety_checker=None,
31
+ torch_dtype=torch.float16,
32
+ )
33
+
34
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
35
+ self.pipe.to("cuda")
36
+ self.pipe.enable_xformers_memory_efficient_attention()
37
+
38
+ return self.pipe
39
+
40
+ def controlnet_hed(self, image_path: str):
41
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
42
+ image = Image.open(image_path)
43
+ image = hed(image)
44
+
45
+ return image
46
+
47
+ def generate_image(
48
+ self,
49
+ image_path: str,
50
+ stable_model_path: str,
51
+ controlnet_hed_model_path: str,
52
+ prompt: str,
53
+ negative_prompt: str,
54
+ num_images_per_prompt: int,
55
+ guidance_scale: int,
56
+ num_inference_step: int,
57
+ sheduler: str,
58
+ seed_generator: int,
59
+ ):
60
+
61
+ image = self.controlnet_hed(image_path=image_path)
62
+
63
+ pipe = self.load_model(
64
+ stable_model_path=stable_model_path,
65
+ controlnet_model_path=controlnet_hed_model_path,
66
+ scheduler=sheduler,
67
+ )
68
+
69
+ if seed_generator == 0:
70
+ random_seed = torch.randint(0, 1000000, (1,))
71
+ generator = torch.manual_seed(random_seed)
72
+ else:
73
+ generator = torch.manual_seed(seed_generator)
74
+
75
+ output = pipe(
76
+ prompt=prompt,
77
+ image=image,
78
+ negative_prompt=negative_prompt,
79
+ num_images_per_prompt=num_images_per_prompt,
80
+ num_inference_steps=num_inference_step,
81
+ guidance_scale=guidance_scale,
82
+ generator=generator,
83
+ ).images
84
+
85
+ return output
86
+
87
+ def app():
88
+ with gr.Blocks():
89
+ with gr.Row():
90
+ with gr.Column():
91
+ controlnet_hed_image_file = gr.Image(
92
+ type="filepath", label="Image"
93
+ )
94
+ controlnet_hed_prompt = gr.Textbox(
95
+ lines=1,
96
+ show_label=False,
97
+ placeholder="Prompt",
98
+ )
99
+
100
+ controlnet_hed_negative_prompt = gr.Textbox(
101
+ lines=1,
102
+ show_label=False,
103
+ placeholder="Negative Prompt",
104
+ )
105
+
106
+ with gr.Row():
107
+ with gr.Column():
108
+ controlnet_hed_stable_model_id = gr.Dropdown(
109
+ choices=stable_model_list,
110
+ value=stable_model_list[0],
111
+ label="Stable Model Id",
112
+ )
113
+ controlnet_hed_guidance_scale = gr.Slider(
114
+ minimum=0.1,
115
+ maximum=15,
116
+ step=0.1,
117
+ value=7.5,
118
+ label="Guidance Scale",
119
+ )
120
+ controlnet_hed_num_inference_step = gr.Slider(
121
+ minimum=1,
122
+ maximum=100,
123
+ step=1,
124
+ value=50,
125
+ label="Num Inference Step",
126
+ )
127
+
128
+ controlnet_hed_num_images_per_prompt = gr.Slider(
129
+ minimum=1,
130
+ maximum=10,
131
+ step=1,
132
+ value=1,
133
+ label="Number Of Images",
134
+ )
135
+
136
+ with gr.Row():
137
+ with gr.Column():
138
+ controlnet_hed_model_id = gr.Dropdown(
139
+ choices=controlnet_hed_model_list,
140
+ value=controlnet_hed_model_list[0],
141
+ label="ControlNet Model Id",
142
+ )
143
+ controlnet_hed_scheduler = gr.Dropdown(
144
+ choices=SCHEDULER_LIST,
145
+ value=SCHEDULER_LIST[0],
146
+ label="Scheduler",
147
+ )
148
+
149
+ controlnet_hed_seed_generator = gr.Number(
150
+ minimum=0,
151
+ maximum=1000000,
152
+ step=1,
153
+ value=0,
154
+ label="Seed Generator",
155
+ )
156
+
157
+ controlnet_hed_predict = gr.Button(value="Generator")
158
+
159
+ with gr.Column():
160
+ output_image = gr.Gallery(
161
+ label="Generated images",
162
+ show_label=False,
163
+ elem_id="gallery",
164
+ ).style(grid=(1, 2))
165
+
166
+ controlnet_hed_predict.click(
167
+ fn=StableDiffusionControlNetHEDGenerator().generate_image,
168
+ inputs=[
169
+ controlnet_hed_image_file,
170
+ controlnet_hed_stable_model_id,
171
+ controlnet_hed_model_id,
172
+ controlnet_hed_prompt,
173
+ controlnet_hed_negative_prompt,
174
+ controlnet_hed_num_images_per_prompt,
175
+ controlnet_hed_guidance_scale,
176
+ controlnet_hed_num_inference_step,
177
+ controlnet_hed_scheduler,
178
+ controlnet_hed_seed_generator,
179
+ ],
180
+ outputs=[output_image],
181
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/__init__.py ADDED
File without changes
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import gradio as gr
3
+ import numpy as np
4
+ import torch
5
+ from diffusers import ControlNetModel
6
+ from PIL import Image
7
+
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_canny_model_list,
13
+ stable_inpiant_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+ # https://github.com/mikonvergence/ControlNetInpaint
21
+
22
+
23
+ class StableDiffusionControlNetInpaintCannyGenerator:
24
+ def __init__(self):
25
+ self.pipe = None
26
+
27
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
28
+ if self.pipe is None:
29
+ controlnet = ControlNetModel.from_pretrained(
30
+ controlnet_model_path, torch_dtype=torch.float16
31
+ )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
+ )
40
+
41
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
42
+ self.pipe.to("cuda")
43
+ self.pipe.enable_xformers_memory_efficient_attention()
44
+
45
+ return self.pipe
46
+
47
+ def load_image(self, image_path):
48
+ image = np.array(image_path)
49
+ image = Image.fromarray(image)
50
+ return image
51
+
52
+ def controlnet_canny_inpaint(
53
+ self,
54
+ image_path: str,
55
+ ):
56
+ image = image_path["image"].convert("RGB").resize((512, 512))
57
+ image = np.array(image)
58
+
59
+ image = cv2.Canny(image, 100, 200)
60
+ image = image[:, :, None]
61
+ image = np.concatenate([image, image, image], axis=2)
62
+ image = Image.fromarray(image)
63
+
64
+ return image
65
+
66
+ def generate_image(
67
+ self,
68
+ image_path: str,
69
+ stable_model_path: str,
70
+ controlnet_model_path: str,
71
+ prompt: str,
72
+ negative_prompt: str,
73
+ num_images_per_prompt: int,
74
+ guidance_scale: int,
75
+ num_inference_step: int,
76
+ controlnet_conditioning_scale: int,
77
+ scheduler: str,
78
+ seed_generator: int,
79
+ ):
80
+
81
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
82
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
83
+
84
+ normal_image = self.load_image(image_path=normal_image)
85
+ mask_image = self.load_image(image_path=mask_image)
86
+
87
+ control_image = self.controlnet_canny_inpaint(image_path=image_path)
88
+ pipe = self.load_model(
89
+ stable_model_path=stable_model_path,
90
+ controlnet_model_path=controlnet_model_path,
91
+ scheduler=scheduler,
92
+ )
93
+
94
+ if seed_generator == 0:
95
+ random_seed = torch.randint(0, 1000000, (1,))
96
+ generator = torch.manual_seed(random_seed)
97
+ else:
98
+ generator = torch.manual_seed(seed_generator)
99
+
100
+ output = pipe(
101
+ prompt=prompt,
102
+ image=normal_image,
103
+ mask_image=mask_image,
104
+ control_image=control_image,
105
+ negative_prompt=negative_prompt,
106
+ num_images_per_prompt=num_images_per_prompt,
107
+ num_inference_steps=num_inference_step,
108
+ guidance_scale=guidance_scale,
109
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
110
+ generator=generator,
111
+ ).images
112
+
113
+ return output
114
+
115
+ def app():
116
+ with gr.Blocks():
117
+ with gr.Row():
118
+ with gr.Column():
119
+ controlnet_canny_inpaint_image_file = gr.Image(
120
+ source="upload",
121
+ tool="sketch",
122
+ elem_id="image_upload",
123
+ type="pil",
124
+ label="Upload",
125
+ )
126
+
127
+ controlnet_canny_inpaint_prompt = gr.Textbox(
128
+ lines=1, placeholder="Prompt", show_label=False
129
+ )
130
+
131
+ controlnet_canny_inpaint_negative_prompt = gr.Textbox(
132
+ lines=1,
133
+ show_label=False,
134
+ placeholder="Negative Prompt",
135
+ )
136
+ with gr.Row():
137
+ with gr.Column():
138
+ controlnet_canny_inpaint_stable_model_id = (
139
+ gr.Dropdown(
140
+ choices=stable_inpiant_model_list,
141
+ value=stable_inpiant_model_list[0],
142
+ label="Stable Model Id",
143
+ )
144
+ )
145
+
146
+ controlnet_canny_inpaint_guidance_scale = gr.Slider(
147
+ minimum=0.1,
148
+ maximum=15,
149
+ step=0.1,
150
+ value=7.5,
151
+ label="Guidance Scale",
152
+ )
153
+
154
+ controlnet_canny_inpaint_num_inference_step = (
155
+ gr.Slider(
156
+ minimum=1,
157
+ maximum=100,
158
+ step=1,
159
+ value=50,
160
+ label="Num Inference Step",
161
+ )
162
+ )
163
+ controlnet_canny_inpaint_num_images_per_prompt = (
164
+ gr.Slider(
165
+ minimum=1,
166
+ maximum=10,
167
+ step=1,
168
+ value=1,
169
+ label="Number Of Images",
170
+ )
171
+ )
172
+ with gr.Row():
173
+ with gr.Column():
174
+ controlnet_canny_inpaint_model_id = gr.Dropdown(
175
+ choices=controlnet_canny_model_list,
176
+ value=controlnet_canny_model_list[0],
177
+ label="Controlnet Model Id",
178
+ )
179
+ controlnet_canny_inpaint_scheduler = (
180
+ gr.Dropdown(
181
+ choices=SCHEDULER_LIST,
182
+ value=SCHEDULER_LIST[0],
183
+ label="Scheduler",
184
+ )
185
+ )
186
+ controlnet_canny_inpaint_controlnet_conditioning_scale = gr.Slider(
187
+ minimum=0.1,
188
+ maximum=1.0,
189
+ step=0.1,
190
+ value=0.5,
191
+ label="Controlnet Conditioning Scale",
192
+ )
193
+
194
+ controlnet_canny_inpaint_seed_generator = (
195
+ gr.Slider(
196
+ minimum=0,
197
+ maximum=1000000,
198
+ step=1,
199
+ value=0,
200
+ label="Seed Generator",
201
+ )
202
+ )
203
+
204
+ controlnet_canny_inpaint_predict = gr.Button(
205
+ value="Generator"
206
+ )
207
+
208
+ with gr.Column():
209
+ output_image = gr.Gallery(
210
+ label="Generated images",
211
+ show_label=False,
212
+ elem_id="gallery",
213
+ ).style(grid=(1, 2))
214
+
215
+ controlnet_canny_inpaint_predict.click(
216
+ fn=StableDiffusionControlNetInpaintCannyGenerator().generate_image,
217
+ inputs=[
218
+ controlnet_canny_inpaint_image_file,
219
+ controlnet_canny_inpaint_stable_model_id,
220
+ controlnet_canny_inpaint_model_id,
221
+ controlnet_canny_inpaint_prompt,
222
+ controlnet_canny_inpaint_negative_prompt,
223
+ controlnet_canny_inpaint_num_images_per_prompt,
224
+ controlnet_canny_inpaint_guidance_scale,
225
+ controlnet_canny_inpaint_num_inference_step,
226
+ controlnet_canny_inpaint_controlnet_conditioning_scale,
227
+ controlnet_canny_inpaint_scheduler,
228
+ controlnet_canny_inpaint_seed_generator,
229
+ ],
230
+ outputs=[output_image],
231
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_depth.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from diffusers import ControlNetModel
5
+ from PIL import Image
6
+ from transformers import pipeline
7
+
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_depth_model_list,
13
+ stable_inpiant_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+ # https://github.com/mikonvergence/ControlNetInpaint
21
+
22
+
23
+ class StableDiffusionControlInpaintNetDepthGenerator:
24
+ def __init__(self):
25
+ self.pipe = None
26
+
27
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
28
+ if self.pipe is None:
29
+ controlnet = ControlNetModel.from_pretrained(
30
+ controlnet_model_path, torch_dtype=torch.float16
31
+ )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
+ )
40
+
41
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
42
+ self.pipe.to("cuda")
43
+ self.pipe.enable_xformers_memory_efficient_attention()
44
+
45
+ return self.pipe
46
+
47
+ def load_image(self, image_path):
48
+ image = np.array(image_path)
49
+ image = Image.fromarray(image)
50
+ return image
51
+
52
+ def controlnet_inpaint_depth(self, image_path: str):
53
+ depth_estimator = pipeline("depth-estimation")
54
+ image = image_path["image"].convert("RGB").resize((512, 512))
55
+ image = depth_estimator(image)["depth"]
56
+ image = np.array(image)
57
+ image = image[:, :, None]
58
+ image = np.concatenate([image, image, image], axis=2)
59
+ image = Image.fromarray(image)
60
+
61
+ return image
62
+
63
+ def generate_image(
64
+ self,
65
+ image_path: str,
66
+ stable_model_path: str,
67
+ controlnet_model_path: str,
68
+ prompt: str,
69
+ negative_prompt: str,
70
+ num_images_per_prompt: int,
71
+ guidance_scale: int,
72
+ num_inference_step: int,
73
+ controlnet_conditioning_scale: int,
74
+ scheduler: str,
75
+ seed_generator: int,
76
+ ):
77
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
78
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
79
+
80
+ normal_image = self.load_image(image_path=normal_image)
81
+ mask_image = self.load_image(image_path=mask_image)
82
+
83
+ control_image = self.controlnet_inpaint_depth(image_path=image_path)
84
+
85
+ pipe = self.load_model(
86
+ stable_model_path=stable_model_path,
87
+ controlnet_model_path=controlnet_model_path,
88
+ scheduler=scheduler,
89
+ )
90
+
91
+ if seed_generator == 0:
92
+ random_seed = torch.randint(0, 1000000, (1,))
93
+ generator = torch.manual_seed(random_seed)
94
+ else:
95
+ generator = torch.manual_seed(seed_generator)
96
+
97
+ output = pipe(
98
+ prompt=prompt,
99
+ image=normal_image,
100
+ mask_image=mask_image,
101
+ control_image=control_image,
102
+ negative_prompt=negative_prompt,
103
+ num_images_per_prompt=num_images_per_prompt,
104
+ num_inference_steps=num_inference_step,
105
+ guidance_scale=guidance_scale,
106
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
107
+ generator=generator,
108
+ ).images
109
+
110
+ return output
111
+
112
+ def app():
113
+ with gr.Blocks():
114
+ with gr.Row():
115
+ with gr.Column():
116
+ controlnet_depth_inpaint_image_file = gr.Image(
117
+ source="upload",
118
+ tool="sketch",
119
+ elem_id="image_upload",
120
+ type="pil",
121
+ label="Upload",
122
+ )
123
+
124
+ controlnet_depth_inpaint_prompt = gr.Textbox(
125
+ lines=1, placeholder="Prompt", show_label=False
126
+ )
127
+
128
+ controlnet_depth_inpaint_negative_prompt = gr.Textbox(
129
+ lines=1,
130
+ show_label=False,
131
+ placeholder="Negative Prompt",
132
+ )
133
+ with gr.Row():
134
+ with gr.Column():
135
+ controlnet_depth_inpaint_stable_model_id = (
136
+ gr.Dropdown(
137
+ choices=stable_inpiant_model_list,
138
+ value=stable_inpiant_model_list[0],
139
+ label="Stable Model Id",
140
+ )
141
+ )
142
+
143
+ controlnet_depth_inpaint_guidance_scale = gr.Slider(
144
+ minimum=0.1,
145
+ maximum=15,
146
+ step=0.1,
147
+ value=7.5,
148
+ label="Guidance Scale",
149
+ )
150
+
151
+ controlnet_depth_inpaint_num_inference_step = (
152
+ gr.Slider(
153
+ minimum=1,
154
+ maximum=100,
155
+ step=1,
156
+ value=50,
157
+ label="Num Inference Step",
158
+ )
159
+ )
160
+ controlnet_depth_inpaint_num_images_per_prompt = (
161
+ gr.Slider(
162
+ minimum=1,
163
+ maximum=10,
164
+ step=1,
165
+ value=1,
166
+ label="Number Of Images",
167
+ )
168
+ )
169
+ with gr.Row():
170
+ with gr.Column():
171
+ controlnet_depth_inpaint_model_id = gr.Dropdown(
172
+ choices=controlnet_depth_model_list,
173
+ value=controlnet_depth_model_list[0],
174
+ label="Controlnet Model Id",
175
+ )
176
+ controlnet_depth_inpaint_scheduler = (
177
+ gr.Dropdown(
178
+ choices=SCHEDULER_LIST,
179
+ value=SCHEDULER_LIST[0],
180
+ label="Scheduler",
181
+ )
182
+ )
183
+ controlnet_depth_inpaint_controlnet_conditioning_scale = gr.Slider(
184
+ minimum=0.1,
185
+ maximum=1.0,
186
+ step=0.1,
187
+ value=0.5,
188
+ label="Controlnet Conditioning Scale",
189
+ )
190
+
191
+ controlnet_depth_inpaint_seed_generator = (
192
+ gr.Slider(
193
+ minimum=0,
194
+ maximum=1000000,
195
+ step=1,
196
+ value=0,
197
+ label="Seed Generator",
198
+ )
199
+ )
200
+
201
+ controlnet_depth_inpaint_predict = gr.Button(
202
+ value="Generator"
203
+ )
204
+
205
+ with gr.Column():
206
+ output_image = gr.Gallery(
207
+ label="Generated images",
208
+ show_label=False,
209
+ elem_id="gallery",
210
+ ).style(grid=(1, 2))
211
+
212
+ controlnet_depth_inpaint_predict.click(
213
+ fn=StableDiffusionControlInpaintNetDepthGenerator().generate_image,
214
+ inputs=[
215
+ controlnet_depth_inpaint_image_file,
216
+ controlnet_depth_inpaint_stable_model_id,
217
+ controlnet_depth_inpaint_model_id,
218
+ controlnet_depth_inpaint_prompt,
219
+ controlnet_depth_inpaint_negative_prompt,
220
+ controlnet_depth_inpaint_num_images_per_prompt,
221
+ controlnet_depth_inpaint_guidance_scale,
222
+ controlnet_depth_inpaint_num_inference_step,
223
+ controlnet_depth_inpaint_controlnet_conditioning_scale,
224
+ controlnet_depth_inpaint_scheduler,
225
+ controlnet_depth_inpaint_seed_generator,
226
+ ],
227
+ outputs=[output_image],
228
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_hed.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import HEDdetector
5
+ from diffusers import ControlNetModel
6
+ from PIL import Image
7
+
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_hed_model_list,
13
+ stable_inpiant_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+ # https://github.com/mikonvergence/ControlNetInpaint
21
+
22
+
23
+ class StableDiffusionControlNetInpaintHedGenerator:
24
+ def __init__(self):
25
+ self.pipe = None
26
+
27
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
28
+ if self.pipe is None:
29
+ controlnet = ControlNetModel.from_pretrained(
30
+ controlnet_model_path, torch_dtype=torch.float16
31
+ )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
+ )
40
+
41
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
42
+ self.pipe.to("cuda")
43
+ self.pipe.enable_xformers_memory_efficient_attention()
44
+
45
+ return self.pipe
46
+
47
+ def load_image(self, image_path):
48
+ image = np.array(image_path)
49
+ image = Image.fromarray(image)
50
+ return image
51
+
52
+ def controlnet_inpaint_hed(self, image_path: str):
53
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
54
+ image = image_path["image"].convert("RGB").resize((512, 512))
55
+ image = np.array(image)
56
+ image = hed(image)
57
+
58
+ return image
59
+
60
+ def generate_image(
61
+ self,
62
+ image_path: str,
63
+ stable_model_path: str,
64
+ controlnet_model_path: str,
65
+ prompt: str,
66
+ negative_prompt: str,
67
+ num_images_per_prompt: int,
68
+ guidance_scale: int,
69
+ num_inference_step: int,
70
+ controlnet_conditioning_scale: int,
71
+ scheduler: str,
72
+ seed_generator: int,
73
+ ):
74
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
75
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
76
+
77
+ normal_image = self.load_image(image_path=normal_image)
78
+ mask_image = self.load_image(image_path=mask_image)
79
+
80
+ control_image = self.controlnet_inpaint_hed(image_path=image_path)
81
+
82
+ pipe = self.load_model(
83
+ stable_model_path=stable_model_path,
84
+ controlnet_model_path=controlnet_model_path,
85
+ scheduler=scheduler,
86
+ )
87
+
88
+ if seed_generator == 0:
89
+ random_seed = torch.randint(0, 1000000, (1,))
90
+ generator = torch.manual_seed(random_seed)
91
+ else:
92
+ generator = torch.manual_seed(seed_generator)
93
+
94
+ output = pipe(
95
+ prompt=prompt,
96
+ image=normal_image,
97
+ mask_image=mask_image,
98
+ control_image=control_image,
99
+ negative_prompt=negative_prompt,
100
+ num_images_per_prompt=num_images_per_prompt,
101
+ num_inference_steps=num_inference_step,
102
+ guidance_scale=guidance_scale,
103
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
104
+ generator=generator,
105
+ ).images
106
+
107
+ return output
108
+
109
+ def app():
110
+ with gr.Blocks():
111
+ with gr.Row():
112
+ with gr.Column():
113
+ controlnet_hed_inpaint_image_file = gr.Image(
114
+ source="upload",
115
+ tool="sketch",
116
+ elem_id="image_upload",
117
+ type="pil",
118
+ label="Upload",
119
+ )
120
+
121
+ controlnet_hed_inpaint_prompt = gr.Textbox(
122
+ lines=1, placeholder="Prompt", show_label=False
123
+ )
124
+
125
+ controlnet_hed_inpaint_negative_prompt = gr.Textbox(
126
+ lines=1,
127
+ show_label=False,
128
+ placeholder="Negative Prompt",
129
+ )
130
+ with gr.Row():
131
+ with gr.Column():
132
+ controlnet_hed_inpaint_stable_model_id = (
133
+ gr.Dropdown(
134
+ choices=stable_inpiant_model_list,
135
+ value=stable_inpiant_model_list[0],
136
+ label="Stable Model Id",
137
+ )
138
+ )
139
+
140
+ controlnet_hed_inpaint_guidance_scale = gr.Slider(
141
+ minimum=0.1,
142
+ maximum=15,
143
+ step=0.1,
144
+ value=7.5,
145
+ label="Guidance Scale",
146
+ )
147
+
148
+ controlnet_hed_inpaint_num_inference_step = (
149
+ gr.Slider(
150
+ minimum=1,
151
+ maximum=100,
152
+ step=1,
153
+ value=50,
154
+ label="Num Inference Step",
155
+ )
156
+ )
157
+ controlnet_hed_inpaint_num_images_per_prompt = (
158
+ gr.Slider(
159
+ minimum=1,
160
+ maximum=10,
161
+ step=1,
162
+ value=1,
163
+ label="Number Of Images",
164
+ )
165
+ )
166
+ with gr.Row():
167
+ with gr.Column():
168
+ controlnet_hed_inpaint_model_id = gr.Dropdown(
169
+ choices=controlnet_hed_model_list,
170
+ value=controlnet_hed_model_list[0],
171
+ label="Controlnet Model Id",
172
+ )
173
+ controlnet_hed_inpaint_scheduler = gr.Dropdown(
174
+ choices=SCHEDULER_LIST,
175
+ value=SCHEDULER_LIST[0],
176
+ label="Scheduler",
177
+ )
178
+ controlnet_hed_inpaint_controlnet_conditioning_scale = gr.Slider(
179
+ minimum=0.1,
180
+ maximum=1.0,
181
+ step=0.1,
182
+ value=0.5,
183
+ label="Controlnet Conditioning Scale",
184
+ )
185
+
186
+ controlnet_hed_inpaint_seed_generator = (
187
+ gr.Slider(
188
+ minimum=0,
189
+ maximum=1000000,
190
+ step=1,
191
+ value=0,
192
+ label="Seed Generator",
193
+ )
194
+ )
195
+
196
+ controlnet_hed_inpaint_predict = gr.Button(
197
+ value="Generator"
198
+ )
199
+
200
+ with gr.Column():
201
+ output_image = gr.Gallery(
202
+ label="Generated images",
203
+ show_label=False,
204
+ elem_id="gallery",
205
+ ).style(grid=(1, 2))
206
+
207
+ controlnet_hed_inpaint_predict.click(
208
+ fn=StableDiffusionControlNetInpaintHedGenerator().generate_image,
209
+ inputs=[
210
+ controlnet_hed_inpaint_image_file,
211
+ controlnet_hed_inpaint_stable_model_id,
212
+ controlnet_hed_inpaint_model_id,
213
+ controlnet_hed_inpaint_prompt,
214
+ controlnet_hed_inpaint_negative_prompt,
215
+ controlnet_hed_inpaint_num_images_per_prompt,
216
+ controlnet_hed_inpaint_guidance_scale,
217
+ controlnet_hed_inpaint_num_inference_step,
218
+ controlnet_hed_inpaint_controlnet_conditioning_scale,
219
+ controlnet_hed_inpaint_scheduler,
220
+ controlnet_hed_inpaint_seed_generator,
221
+ ],
222
+ outputs=[output_image],
223
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_mlsd.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import MLSDdetector
5
+ from diffusers import ControlNetModel
6
+ from PIL import Image
7
+
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_mlsd_model_list,
13
+ stable_inpiant_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+ # https://github.com/mikonvergence/ControlNetInpaint
21
+
22
+
23
+ class StableDiffusionControlNetInpaintMlsdGenerator:
24
+ def __init__(self):
25
+ self.pipe = None
26
+
27
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
28
+ if self.pipe is None:
29
+ controlnet = ControlNetModel.from_pretrained(
30
+ controlnet_model_path, torch_dtype=torch.float16
31
+ )
32
+ self.pipe = (
33
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
34
+ pretrained_model_name_or_path=stable_model_path,
35
+ controlnet=controlnet,
36
+ safety_checker=None,
37
+ torch_dtype=torch.float16,
38
+ )
39
+ )
40
+
41
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
42
+ self.pipe.to("cuda")
43
+ self.pipe.enable_xformers_memory_efficient_attention()
44
+
45
+ return self.pipe
46
+
47
+ def load_image(self, image_path):
48
+ image = np.array(image_path)
49
+ image = Image.fromarray(image)
50
+ return image
51
+
52
+ def controlnet_inpaint_mlsd(self, image_path: str):
53
+ mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
54
+ image = image_path["image"].convert("RGB").resize((512, 512))
55
+ image = np.array(image)
56
+ image = mlsd(image)
57
+
58
+ return image
59
+
60
+ def generate_image(
61
+ self,
62
+ image_path: str,
63
+ stable_model_path: str,
64
+ controlnet_model_path: str,
65
+ prompt: str,
66
+ negative_prompt: str,
67
+ num_images_per_prompt: int,
68
+ guidance_scale: int,
69
+ num_inference_step: int,
70
+ controlnet_conditioning_scale: int,
71
+ scheduler: str,
72
+ seed_generator: int,
73
+ ):
74
+
75
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
76
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
77
+
78
+ normal_image = self.load_image(image_path=normal_image)
79
+ mask_image = self.load_image(image_path=mask_image)
80
+
81
+ control_image = self.controlnet_inpaint_mlsd(image_path=image_path)
82
+
83
+ pipe = self.load_model(
84
+ stable_model_path=stable_model_path,
85
+ controlnet_model_path=controlnet_model_path,
86
+ scheduler=scheduler,
87
+ )
88
+
89
+ if seed_generator == 0:
90
+ random_seed = torch.randint(0, 1000000, (1,))
91
+ generator = torch.manual_seed(random_seed)
92
+ else:
93
+ generator = torch.manual_seed(seed_generator)
94
+
95
+ output = pipe(
96
+ prompt=prompt,
97
+ image=normal_image,
98
+ mask_image=mask_image,
99
+ control_image=control_image,
100
+ negative_prompt=negative_prompt,
101
+ num_images_per_prompt=num_images_per_prompt,
102
+ num_inference_steps=num_inference_step,
103
+ guidance_scale=guidance_scale,
104
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
105
+ generator=generator,
106
+ ).images
107
+
108
+ return output
109
+
110
+ def app():
111
+ with gr.Blocks():
112
+ with gr.Row():
113
+ with gr.Column():
114
+ controlnet_mlsd_inpaint_image_file = gr.Image(
115
+ source="upload",
116
+ tool="sketch",
117
+ elem_id="image_upload",
118
+ type="pil",
119
+ label="Upload",
120
+ )
121
+
122
+ controlnet_mlsd_inpaint_prompt = gr.Textbox(
123
+ lines=1, placeholder="Prompt", show_label=False
124
+ )
125
+
126
+ controlnet_mlsd_inpaint_negative_prompt = gr.Textbox(
127
+ lines=1,
128
+ show_label=False,
129
+ placeholder="Negative Prompt",
130
+ )
131
+ with gr.Row():
132
+ with gr.Column():
133
+ controlnet_mlsd_inpaint_stable_model_id = (
134
+ gr.Dropdown(
135
+ choices=stable_inpiant_model_list,
136
+ value=stable_inpiant_model_list[0],
137
+ label="Stable Model Id",
138
+ )
139
+ )
140
+
141
+ controlnet_mlsd_inpaint_guidance_scale = gr.Slider(
142
+ minimum=0.1,
143
+ maximum=15,
144
+ step=0.1,
145
+ value=7.5,
146
+ label="Guidance Scale",
147
+ )
148
+
149
+ controlnet_mlsd_inpaint_num_inference_step = (
150
+ gr.Slider(
151
+ minimum=1,
152
+ maximum=100,
153
+ step=1,
154
+ value=50,
155
+ label="Num Inference Step",
156
+ )
157
+ )
158
+ controlnet_mlsd_inpaint_num_images_per_prompt = (
159
+ gr.Slider(
160
+ minimum=1,
161
+ maximum=10,
162
+ step=1,
163
+ value=1,
164
+ label="Number Of Images",
165
+ )
166
+ )
167
+ with gr.Row():
168
+ with gr.Column():
169
+ controlnet_mlsd_inpaint_model_id = gr.Dropdown(
170
+ choices=controlnet_mlsd_model_list,
171
+ value=controlnet_mlsd_model_list[0],
172
+ label="Controlnet Model Id",
173
+ )
174
+ controlnet_mlsd_inpaint_scheduler = gr.Dropdown(
175
+ choices=SCHEDULER_LIST,
176
+ value=SCHEDULER_LIST[0],
177
+ label="Scheduler",
178
+ )
179
+ controlnet_mlsd_inpaint_controlnet_conditioning_scale = gr.Slider(
180
+ minimum=0.1,
181
+ maximum=1.0,
182
+ step=0.1,
183
+ value=0.5,
184
+ label="Controlnet Conditioning Scale",
185
+ )
186
+
187
+ controlnet_mlsd_inpaint_seed_generator = (
188
+ gr.Slider(
189
+ minimum=0,
190
+ maximum=1000000,
191
+ step=1,
192
+ value=0,
193
+ label="Seed Generator",
194
+ )
195
+ )
196
+
197
+ controlnet_mlsd_inpaint_predict = gr.Button(
198
+ value="Generator"
199
+ )
200
+
201
+ with gr.Column():
202
+ output_image = gr.Gallery(
203
+ label="Generated images",
204
+ show_label=False,
205
+ elem_id="gallery",
206
+ ).style(grid=(1, 2))
207
+
208
+ controlnet_mlsd_inpaint_predict.click(
209
+ fn=StableDiffusionControlNetInpaintMlsdGenerator().generate_image,
210
+ inputs=[
211
+ controlnet_mlsd_inpaint_image_file,
212
+ controlnet_mlsd_inpaint_stable_model_id,
213
+ controlnet_mlsd_inpaint_model_id,
214
+ controlnet_mlsd_inpaint_prompt,
215
+ controlnet_mlsd_inpaint_negative_prompt,
216
+ controlnet_mlsd_inpaint_num_images_per_prompt,
217
+ controlnet_mlsd_inpaint_guidance_scale,
218
+ controlnet_mlsd_inpaint_num_inference_step,
219
+ controlnet_mlsd_inpaint_controlnet_conditioning_scale,
220
+ controlnet_mlsd_inpaint_scheduler,
221
+ controlnet_mlsd_inpaint_seed_generator,
222
+ ],
223
+ outputs=[output_image],
224
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_pose.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import OpenposeDetector
5
+ from diffusers import ControlNetModel
6
+ from PIL import Image
7
+
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_pose_model_list,
13
+ stable_inpiant_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+ # https://github.com/mikonvergence/ControlNetInpaint
21
+
22
+
23
+ class StableDiffusionControlNetInpaintPoseGenerator:
24
+ def __init__(self):
25
+ self.pipe = None
26
+
27
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
28
+ if self.pipe is None:
29
+ controlnet = ControlNetModel.from_pretrained(
30
+ controlnet_model_path, torch_dtype=torch.float16
31
+ )
32
+
33
+ self.pipe = (
34
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
35
+ pretrained_model_name_or_path=stable_model_path,
36
+ controlnet=controlnet,
37
+ safety_checker=None,
38
+ torch_dtype=torch.float16,
39
+ )
40
+ )
41
+
42
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
43
+ self.pipe.to("cuda")
44
+ self.pipe.enable_xformers_memory_efficient_attention()
45
+
46
+ return self.pipe
47
+
48
+ def load_image(self, image_path):
49
+ image = np.array(image_path)
50
+ image = Image.fromarray(image)
51
+ return image
52
+
53
+ def controlnet_pose_inpaint(self, image_path: str):
54
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
55
+
56
+ image = image_path["image"].convert("RGB").resize((512, 512))
57
+ image = np.array(image)
58
+ image = openpose(image)
59
+
60
+ return image
61
+
62
+ def generate_image(
63
+ self,
64
+ image_path: str,
65
+ stable_model_path: str,
66
+ controlnet_model_path: str,
67
+ prompt: str,
68
+ negative_prompt: str,
69
+ num_images_per_prompt: int,
70
+ guidance_scale: int,
71
+ num_inference_step: int,
72
+ controlnet_conditioning_scale: int,
73
+ scheduler: str,
74
+ seed_generator: int,
75
+ ):
76
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
77
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
78
+
79
+ normal_image = self.load_image(image_path=normal_image)
80
+ mask_image = self.load_image(image_path=mask_image)
81
+
82
+ controlnet_image = self.controlnet_pose_inpaint(image_path=image_path)
83
+
84
+ pipe = self.load_model(
85
+ stable_model_path=stable_model_path,
86
+ controlnet_model_path=controlnet_model_path,
87
+ scheduler=scheduler,
88
+ )
89
+
90
+ if seed_generator == 0:
91
+ random_seed = torch.randint(0, 1000000, (1,))
92
+ generator = torch.manual_seed(random_seed)
93
+ else:
94
+ generator = torch.manual_seed(seed_generator)
95
+
96
+ output = pipe(
97
+ prompt=prompt,
98
+ image=normal_image,
99
+ mask_image=mask_image,
100
+ control_image=controlnet_image,
101
+ negative_prompt=negative_prompt,
102
+ num_images_per_prompt=num_images_per_prompt,
103
+ num_inference_steps=num_inference_step,
104
+ guidance_scale=guidance_scale,
105
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
106
+ generator=generator,
107
+ ).images
108
+
109
+ return output
110
+
111
+ def app():
112
+ with gr.Blocks():
113
+ with gr.Row():
114
+ with gr.Column():
115
+ controlnet_pose_inpaint_image_file = gr.Image(
116
+ source="upload",
117
+ tool="sketch",
118
+ elem_id="image_upload",
119
+ type="pil",
120
+ label="Upload",
121
+ )
122
+
123
+ controlnet_pose_inpaint_prompt = gr.Textbox(
124
+ lines=1, placeholder="Prompt", show_label=False
125
+ )
126
+
127
+ controlnet_pose_inpaint_negative_prompt = gr.Textbox(
128
+ lines=1,
129
+ show_label=False,
130
+ placeholder="Negative Prompt",
131
+ )
132
+ with gr.Row():
133
+ with gr.Column():
134
+ controlnet_pose_inpaint_stable_model_id = (
135
+ gr.Dropdown(
136
+ choices=stable_inpiant_model_list,
137
+ value=stable_inpiant_model_list[0],
138
+ label="Stable Model Id",
139
+ )
140
+ )
141
+
142
+ controlnet_pose_inpaint_guidance_scale = gr.Slider(
143
+ minimum=0.1,
144
+ maximum=15,
145
+ step=0.1,
146
+ value=7.5,
147
+ label="Guidance Scale",
148
+ )
149
+
150
+ controlnet_pose_inpaint_num_inference_step = (
151
+ gr.Slider(
152
+ minimum=1,
153
+ maximum=100,
154
+ step=1,
155
+ value=50,
156
+ label="Num Inference Step",
157
+ )
158
+ )
159
+ controlnet_pose_inpaint_num_images_per_prompt = (
160
+ gr.Slider(
161
+ minimum=1,
162
+ maximum=10,
163
+ step=1,
164
+ value=1,
165
+ label="Number Of Images",
166
+ )
167
+ )
168
+ with gr.Row():
169
+ with gr.Column():
170
+ controlnet_pose_inpaint_model_id = gr.Dropdown(
171
+ choices=controlnet_pose_model_list,
172
+ value=controlnet_pose_model_list[0],
173
+ label="Controlnet Model Id",
174
+ )
175
+ controlnet_pose_inpaint_scheduler = gr.Dropdown(
176
+ choices=SCHEDULER_LIST,
177
+ value=SCHEDULER_LIST[0],
178
+ label="Scheduler",
179
+ )
180
+ controlnet_pose_inpaint_controlnet_conditioning_scale = gr.Slider(
181
+ minimum=0.1,
182
+ maximum=1.0,
183
+ step=0.1,
184
+ value=0.5,
185
+ label="Controlnet Conditioning Scale",
186
+ )
187
+
188
+ controlnet_pose_inpaint_seed_generator = (
189
+ gr.Slider(
190
+ minimum=0,
191
+ maximum=1000000,
192
+ step=1,
193
+ value=0,
194
+ label="Seed Generator",
195
+ )
196
+ )
197
+
198
+ controlnet_pose_inpaint_predict = gr.Button(
199
+ value="Generator"
200
+ )
201
+
202
+ with gr.Column():
203
+ output_image = gr.Gallery(
204
+ label="Generated images",
205
+ show_label=False,
206
+ elem_id="gallery",
207
+ ).style(grid=(1, 2))
208
+
209
+ controlnet_pose_inpaint_predict.click(
210
+ fn=StableDiffusionControlNetInpaintPoseGenerator().generate_image,
211
+ inputs=[
212
+ controlnet_pose_inpaint_image_file,
213
+ controlnet_pose_inpaint_stable_model_id,
214
+ controlnet_pose_inpaint_model_id,
215
+ controlnet_pose_inpaint_prompt,
216
+ controlnet_pose_inpaint_negative_prompt,
217
+ controlnet_pose_inpaint_num_images_per_prompt,
218
+ controlnet_pose_inpaint_guidance_scale,
219
+ controlnet_pose_inpaint_num_inference_step,
220
+ controlnet_pose_inpaint_controlnet_conditioning_scale,
221
+ controlnet_pose_inpaint_scheduler,
222
+ controlnet_pose_inpaint_seed_generator,
223
+ ],
224
+ outputs=[output_image],
225
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_scribble.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from controlnet_aux import HEDdetector
5
+ from diffusers import ControlNetModel
6
+ from PIL import Image
7
+
8
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import (
9
+ StableDiffusionControlNetInpaintPipeline,
10
+ )
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_scribble_model_list,
13
+ stable_inpiant_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+ # https://github.com/mikonvergence/ControlNetInpaint
21
+
22
+
23
+ class StableDiffusionControlNetInpaintScribbleGenerator:
24
+ def __init__(self):
25
+ self.pipe = None
26
+
27
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
28
+ if self.pipe is None:
29
+ controlnet = ControlNetModel.from_pretrained(
30
+ controlnet_model_path, torch_dtype=torch.float16
31
+ )
32
+
33
+ self.pipe = (
34
+ StableDiffusionControlNetInpaintPipeline.from_pretrained(
35
+ pretrained_model_name_or_path=stable_model_path,
36
+ controlnet=controlnet,
37
+ safety_checker=None,
38
+ torch_dtype=torch.float16,
39
+ )
40
+ )
41
+
42
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
43
+ self.pipe.to("cuda")
44
+ self.pipe.enable_xformers_memory_efficient_attention()
45
+
46
+ return self.pipe
47
+
48
+ def load_image(self, image_path):
49
+ image = np.array(image_path)
50
+ image = Image.fromarray(image)
51
+ return image
52
+
53
+ def controlnet_inpaint_scribble(self, image_path: str):
54
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
55
+
56
+ image = image_path["image"].convert("RGB").resize((512, 512))
57
+ image = np.array(image)
58
+ image = hed(image, scribble=True)
59
+
60
+ return image
61
+
62
+ def generate_image(
63
+ self,
64
+ image_path: str,
65
+ stable_model_path: str,
66
+ controlnet_model_path: str,
67
+ prompt: str,
68
+ negative_prompt: str,
69
+ num_images_per_prompt: int,
70
+ guidance_scale: int,
71
+ num_inference_step: int,
72
+ controlnet_conditioning_scale: int,
73
+ scheduler: str,
74
+ seed_generator: int,
75
+ ):
76
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
77
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
78
+
79
+ normal_image = self.load_image(image_path=normal_image)
80
+ mask_image = self.load_image(image_path=mask_image)
81
+
82
+ controlnet_image = self.controlnet_inpaint_scribble(
83
+ image_path=image_path
84
+ )
85
+
86
+ pipe = self.load_model(
87
+ stable_model_path=stable_model_path,
88
+ controlnet_model_path=controlnet_model_path,
89
+ scheduler=scheduler,
90
+ )
91
+
92
+ if seed_generator == 0:
93
+ random_seed = torch.randint(0, 1000000, (1,))
94
+ generator = torch.manual_seed(random_seed)
95
+ else:
96
+ generator = torch.manual_seed(seed_generator)
97
+
98
+ output = pipe(
99
+ prompt=prompt,
100
+ image=normal_image,
101
+ mask_image=mask_image,
102
+ control_image=controlnet_image,
103
+ negative_prompt=negative_prompt,
104
+ num_images_per_prompt=num_images_per_prompt,
105
+ num_inference_steps=num_inference_step,
106
+ guidance_scale=guidance_scale,
107
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
108
+ generator=generator,
109
+ ).images
110
+
111
+ return output
112
+
113
+ def app():
114
+ with gr.Blocks():
115
+ with gr.Row():
116
+ with gr.Column():
117
+ controlnet_scribble_inpaint_image_file = gr.Image(
118
+ source="upload",
119
+ tool="sketch",
120
+ elem_id="image_upload",
121
+ type="pil",
122
+ label="Upload",
123
+ )
124
+
125
+ controlnet_scribble_inpaint_prompt = gr.Textbox(
126
+ lines=1, placeholder="Prompt", show_label=False
127
+ )
128
+
129
+ controlnet_scribble_inpaint_negative_prompt = gr.Textbox(
130
+ lines=1,
131
+ show_label=False,
132
+ placeholder="Negative Prompt",
133
+ )
134
+ with gr.Row():
135
+ with gr.Column():
136
+ controlnet_scribble_inpaint_stable_model_id = (
137
+ gr.Dropdown(
138
+ choices=stable_inpiant_model_list,
139
+ value=stable_inpiant_model_list[0],
140
+ label="Stable Model Id",
141
+ )
142
+ )
143
+
144
+ controlnet_scribble_inpaint_guidance_scale = (
145
+ gr.Slider(
146
+ minimum=0.1,
147
+ maximum=15,
148
+ step=0.1,
149
+ value=7.5,
150
+ label="Guidance Scale",
151
+ )
152
+ )
153
+
154
+ controlnet_scribble_inpaint_num_inference_step = (
155
+ gr.Slider(
156
+ minimum=1,
157
+ maximum=100,
158
+ step=1,
159
+ value=50,
160
+ label="Num Inference Step",
161
+ )
162
+ )
163
+ controlnet_scribble_inpaint_num_images_per_prompt = gr.Slider(
164
+ minimum=1,
165
+ maximum=10,
166
+ step=1,
167
+ value=1,
168
+ label="Number Of Images",
169
+ )
170
+ with gr.Row():
171
+ with gr.Column():
172
+ controlnet_scribble_inpaint_model_id = (
173
+ gr.Dropdown(
174
+ choices=controlnet_scribble_model_list,
175
+ value=controlnet_scribble_model_list[0],
176
+ label="Controlnet Model Id",
177
+ )
178
+ )
179
+ controlnet_scribble_inpaint_scheduler = (
180
+ gr.Dropdown(
181
+ choices=SCHEDULER_LIST,
182
+ value=SCHEDULER_LIST[0],
183
+ label="Scheduler",
184
+ )
185
+ )
186
+ controlnet_scribble_inpaint_controlnet_conditioning_scale = gr.Slider(
187
+ minimum=0.1,
188
+ maximum=1.0,
189
+ step=0.1,
190
+ value=0.5,
191
+ label="Controlnet Conditioning Scale",
192
+ )
193
+
194
+ controlnet_scribble_inpaint_seed_generator = (
195
+ gr.Slider(
196
+ minimum=0,
197
+ maximum=1000000,
198
+ step=1,
199
+ value=0,
200
+ label="Seed Generator",
201
+ )
202
+ )
203
+
204
+ controlnet_scribble_inpaint_predict = gr.Button(
205
+ value="Generator"
206
+ )
207
+
208
+ with gr.Column():
209
+ output_image = gr.Gallery(
210
+ label="Generated images",
211
+ show_label=False,
212
+ elem_id="gallery",
213
+ ).style(grid=(1, 2))
214
+
215
+ controlnet_scribble_inpaint_predict.click(
216
+ fn=StableDiffusionControlNetInpaintScribbleGenerator().generate_image,
217
+ inputs=[
218
+ controlnet_scribble_inpaint_image_file,
219
+ controlnet_scribble_inpaint_stable_model_id,
220
+ controlnet_scribble_inpaint_model_id,
221
+ controlnet_scribble_inpaint_prompt,
222
+ controlnet_scribble_inpaint_negative_prompt,
223
+ controlnet_scribble_inpaint_num_images_per_prompt,
224
+ controlnet_scribble_inpaint_guidance_scale,
225
+ controlnet_scribble_inpaint_num_inference_step,
226
+ controlnet_scribble_inpaint_controlnet_conditioning_scale,
227
+ controlnet_scribble_inpaint_scheduler,
228
+ controlnet_scribble_inpaint_seed_generator,
229
+ ],
230
+ outputs=[output_image],
231
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_seg.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from PIL import Image
6
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
7
+
8
+ from diffusion_webui.utils.model_list import (
9
+ controlnet_seg_model_list,
10
+ stable_inpiant_model_list,
11
+ )
12
+ from diffusion_webui.utils.scheduler_list import (
13
+ SCHEDULER_LIST,
14
+ get_scheduler_list,
15
+ )
16
+
17
+ # https://github.com/mikonvergence/ControlNetInpaint
18
+
19
+
20
+ def ade_palette():
21
+ """ADE20K palette that maps each class to RGB values."""
22
+ return [
23
+ [120, 120, 120],
24
+ [180, 120, 120],
25
+ [6, 230, 230],
26
+ [80, 50, 50],
27
+ [4, 200, 3],
28
+ [120, 120, 80],
29
+ [140, 140, 140],
30
+ [204, 5, 255],
31
+ [230, 230, 230],
32
+ [4, 250, 7],
33
+ [224, 5, 255],
34
+ [235, 255, 7],
35
+ [150, 5, 61],
36
+ [120, 120, 70],
37
+ [8, 255, 51],
38
+ [255, 6, 82],
39
+ [143, 255, 140],
40
+ [204, 255, 4],
41
+ [255, 51, 7],
42
+ [204, 70, 3],
43
+ [0, 102, 200],
44
+ [61, 230, 250],
45
+ [255, 6, 51],
46
+ [11, 102, 255],
47
+ [255, 7, 71],
48
+ [255, 9, 224],
49
+ [9, 7, 230],
50
+ [220, 220, 220],
51
+ [255, 9, 92],
52
+ [112, 9, 255],
53
+ [8, 255, 214],
54
+ [7, 255, 224],
55
+ [255, 184, 6],
56
+ [10, 255, 71],
57
+ [255, 41, 10],
58
+ [7, 255, 255],
59
+ [224, 255, 8],
60
+ [102, 8, 255],
61
+ [255, 61, 6],
62
+ [255, 194, 7],
63
+ [255, 122, 8],
64
+ [0, 255, 20],
65
+ [255, 8, 41],
66
+ [255, 5, 153],
67
+ [6, 51, 255],
68
+ [235, 12, 255],
69
+ [160, 150, 20],
70
+ [0, 163, 255],
71
+ [140, 140, 140],
72
+ [250, 10, 15],
73
+ [20, 255, 0],
74
+ [31, 255, 0],
75
+ [255, 31, 0],
76
+ [255, 224, 0],
77
+ [153, 255, 0],
78
+ [0, 0, 255],
79
+ [255, 71, 0],
80
+ [0, 235, 255],
81
+ [0, 173, 255],
82
+ [31, 0, 255],
83
+ [11, 200, 200],
84
+ [255, 82, 0],
85
+ [0, 255, 245],
86
+ [0, 61, 255],
87
+ [0, 255, 112],
88
+ [0, 255, 133],
89
+ [255, 0, 0],
90
+ [255, 163, 0],
91
+ [255, 102, 0],
92
+ [194, 255, 0],
93
+ [0, 143, 255],
94
+ [51, 255, 0],
95
+ [0, 82, 255],
96
+ [0, 255, 41],
97
+ [0, 255, 173],
98
+ [10, 0, 255],
99
+ [173, 255, 0],
100
+ [0, 255, 153],
101
+ [255, 92, 0],
102
+ [255, 0, 255],
103
+ [255, 0, 245],
104
+ [255, 0, 102],
105
+ [255, 173, 0],
106
+ [255, 0, 20],
107
+ [255, 184, 184],
108
+ [0, 31, 255],
109
+ [0, 255, 61],
110
+ [0, 71, 255],
111
+ [255, 0, 204],
112
+ [0, 255, 194],
113
+ [0, 255, 82],
114
+ [0, 10, 255],
115
+ [0, 112, 255],
116
+ [51, 0, 255],
117
+ [0, 194, 255],
118
+ [0, 122, 255],
119
+ [0, 255, 163],
120
+ [255, 153, 0],
121
+ [0, 255, 10],
122
+ [255, 112, 0],
123
+ [143, 255, 0],
124
+ [82, 0, 255],
125
+ [163, 255, 0],
126
+ [255, 235, 0],
127
+ [8, 184, 170],
128
+ [133, 0, 255],
129
+ [0, 255, 92],
130
+ [184, 0, 255],
131
+ [255, 0, 31],
132
+ [0, 184, 255],
133
+ [0, 214, 255],
134
+ [255, 0, 112],
135
+ [92, 255, 0],
136
+ [0, 224, 255],
137
+ [112, 224, 255],
138
+ [70, 184, 160],
139
+ [163, 0, 255],
140
+ [153, 0, 255],
141
+ [71, 255, 0],
142
+ [255, 0, 163],
143
+ [255, 204, 0],
144
+ [255, 0, 143],
145
+ [0, 255, 235],
146
+ [133, 255, 0],
147
+ [255, 0, 235],
148
+ [245, 0, 255],
149
+ [255, 0, 122],
150
+ [255, 245, 0],
151
+ [10, 190, 212],
152
+ [214, 255, 0],
153
+ [0, 204, 255],
154
+ [20, 0, 255],
155
+ [255, 255, 0],
156
+ [0, 153, 255],
157
+ [0, 41, 255],
158
+ [0, 255, 204],
159
+ [41, 0, 255],
160
+ [41, 255, 0],
161
+ [173, 0, 255],
162
+ [0, 245, 255],
163
+ [71, 0, 255],
164
+ [122, 0, 255],
165
+ [0, 255, 184],
166
+ [0, 92, 255],
167
+ [184, 255, 0],
168
+ [0, 133, 255],
169
+ [255, 214, 0],
170
+ [25, 194, 194],
171
+ [102, 255, 0],
172
+ [92, 0, 255],
173
+ ]
174
+
175
+
176
+ class StableDiffusionControlNetInpaintSegGenerator:
177
+ def __init__(self):
178
+ self.pipe = None
179
+
180
+ def load_model(
181
+ self,
182
+ stable_model_path,
183
+ controlnet_model_path,
184
+ scheduler,
185
+ ):
186
+
187
+ if self.pipe is None:
188
+ controlnet = ControlNetModel.from_pretrained(
189
+ controlnet_model_path, torch_dtype=torch.float16
190
+ )
191
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
192
+ pretrained_model_name_or_path=stable_model_path,
193
+ controlnet=controlnet,
194
+ safety_checker=None,
195
+ torch_dtype=torch.float16,
196
+ )
197
+
198
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
199
+ self.pipe.to("cuda")
200
+ self.pipe.enable_xformers_memory_efficient_attention()
201
+
202
+ return self.pipe
203
+
204
+ def load_image(self, image_path):
205
+ image = np.array(image_path)
206
+ image = Image.fromarray(image)
207
+ return image
208
+
209
+ def controlnet_seg_inpaint(self, image_path: str):
210
+ image_processor = AutoImageProcessor.from_pretrained(
211
+ "openmmlab/upernet-convnext-small"
212
+ )
213
+ image_segmentor = UperNetForSemanticSegmentation.from_pretrained(
214
+ "openmmlab/upernet-convnext-small"
215
+ )
216
+
217
+ image = image_path["image"].convert("RGB").resize((512, 512))
218
+ image = np.array(image)
219
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values
220
+
221
+ with torch.no_grad():
222
+ outputs = image_segmentor(pixel_values)
223
+
224
+ seg = image_processor.post_process_semantic_segmentation(
225
+ outputs, target_sizes=[image.size[::-1]]
226
+ )[0]
227
+
228
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
229
+ palette = np.array(ade_palette())
230
+
231
+ for label, color in enumerate(palette):
232
+ color_seg[seg == label, :] = color
233
+
234
+ color_seg = color_seg.astype(np.uint8)
235
+ image = Image.fromarray(color_seg)
236
+
237
+ return image
238
+
239
+ def generate_image(
240
+ self,
241
+ image_path: str,
242
+ stable_model_path: str,
243
+ controlnet_model_path: str,
244
+ prompt: str,
245
+ negative_prompt: str,
246
+ num_images_per_prompt: int,
247
+ guidance_scale: int,
248
+ num_inference_step: int,
249
+ controlnet_conditioning_scale: int,
250
+ scheduler: str,
251
+ seed_generator: int,
252
+ ):
253
+
254
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
255
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
256
+
257
+ normal_image = self.load_image(image_path=normal_image)
258
+ mask_image = self.load_image(image_path=mask_image)
259
+
260
+ controlnet_image = self.controlnet_seg_inpaint(image_path=image_path)
261
+
262
+ pipe = self.load_model(
263
+ stable_model_path=stable_model_path,
264
+ controlnet_model_path=controlnet_model_path,
265
+ scheduler=scheduler,
266
+ )
267
+
268
+ if seed_generator == 0:
269
+ random_seed = torch.randint(0, 1000000, (1,))
270
+ generator = torch.manual_seed(random_seed)
271
+ else:
272
+ generator = torch.manual_seed(seed_generator)
273
+
274
+ output = pipe(
275
+ prompt=prompt,
276
+ image=normal_image,
277
+ mask_image=mask_image,
278
+ control_image=controlnet_image,
279
+ negative_prompt=negative_prompt,
280
+ num_images_per_prompt=num_images_per_prompt,
281
+ num_inference_steps=num_inference_step,
282
+ guidance_scale=guidance_scale,
283
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
284
+ generator=generator,
285
+ ).images
286
+
287
+ return output
288
+
289
+ def app():
290
+ with gr.Blocks():
291
+ with gr.Row():
292
+ with gr.Column():
293
+ controlnet_seg_inpaint_image_file = gr.Image(
294
+ source="upload",
295
+ tool="sketch",
296
+ elem_id="image_upload",
297
+ type="pil",
298
+ label="Upload",
299
+ )
300
+
301
+ controlnet_seg_inpaint_prompt = gr.Textbox(
302
+ lines=1, placeholder="Prompt", show_label=False
303
+ )
304
+
305
+ controlnet_seg_inpaint_negative_prompt = gr.Textbox(
306
+ lines=1,
307
+ show_label=False,
308
+ placeholder="Negative Prompt",
309
+ )
310
+ with gr.Row():
311
+ with gr.Column():
312
+ controlnet_seg_inpaint_stable_model_id = (
313
+ gr.Dropdown(
314
+ choices=stable_inpiant_model_list,
315
+ value=stable_inpiant_model_list[0],
316
+ label="Stable Model Id",
317
+ )
318
+ )
319
+
320
+ controlnet_seg_inpaint_guidance_scale = gr.Slider(
321
+ minimum=0.1,
322
+ maximum=15,
323
+ step=0.1,
324
+ value=7.5,
325
+ label="Guidance Scale",
326
+ )
327
+
328
+ controlnet_seg_inpaint_num_inference_step = (
329
+ gr.Slider(
330
+ minimum=1,
331
+ maximum=100,
332
+ step=1,
333
+ value=50,
334
+ label="Num Inference Step",
335
+ )
336
+ )
337
+ controlnet_seg_inpaint_num_images_per_prompt = (
338
+ gr.Slider(
339
+ minimum=1,
340
+ maximum=10,
341
+ step=1,
342
+ value=1,
343
+ label="Number Of Images",
344
+ )
345
+ )
346
+ with gr.Row():
347
+ with gr.Column():
348
+ controlnet_seg_inpaint_model_id = gr.Dropdown(
349
+ choices=controlnet_seg_model_list,
350
+ value=controlnet_seg_model_list[0],
351
+ label="Controlnet Model Id",
352
+ )
353
+ controlnet_seg_inpaint_scheduler = gr.Dropdown(
354
+ choices=SCHEDULER_LIST,
355
+ value=SCHEDULER_LIST[0],
356
+ label="Scheduler",
357
+ )
358
+ controlnet_seg_inpaint_controlnet_conditioning_scale = gr.Slider(
359
+ minimum=0.1,
360
+ maximum=1.0,
361
+ step=0.1,
362
+ value=0.5,
363
+ label="Controlnet Conditioning Scale",
364
+ )
365
+
366
+ controlnet_seg_inpaint_seed_generator = (
367
+ gr.Slider(
368
+ minimum=0,
369
+ maximum=1000000,
370
+ step=1,
371
+ value=0,
372
+ label="Seed Generator",
373
+ )
374
+ )
375
+
376
+ controlnet_seg_inpaint_predict = gr.Button(
377
+ value="Generator"
378
+ )
379
+
380
+ with gr.Column():
381
+ output_image = gr.Gallery(
382
+ label="Generated images",
383
+ show_label=False,
384
+ elem_id="gallery",
385
+ ).style(grid=(1, 2))
386
+
387
+ controlnet_seg_inpaint_predict.click(
388
+ fn=StableDiffusionControlNetInpaintSegGenerator().generate_image,
389
+ inputs=[
390
+ controlnet_seg_inpaint_image_file,
391
+ controlnet_seg_inpaint_stable_model_id,
392
+ controlnet_seg_inpaint_model_id,
393
+ controlnet_seg_inpaint_prompt,
394
+ controlnet_seg_inpaint_negative_prompt,
395
+ controlnet_seg_inpaint_num_images_per_prompt,
396
+ controlnet_seg_inpaint_guidance_scale,
397
+ controlnet_seg_inpaint_num_inference_step,
398
+ controlnet_seg_inpaint_controlnet_conditioning_scale,
399
+ controlnet_seg_inpaint_scheduler,
400
+ controlnet_seg_inpaint_seed_generator,
401
+ ],
402
+ outputs=[output_image],
403
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/pipeline_stable_diffusion_controlnet_inpaint.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import numpy as np
17
+ import PIL.Image
18
+ import torch
19
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import *
20
+
21
+ # https://github.com/mikonvergence/ControlNetInpaint
22
+
23
+ EXAMPLE_DOC_STRING = """
24
+ Examples:
25
+ ```py
26
+ >>> # !pip install opencv-python transformers accelerate
27
+ >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, UniPCMultistepScheduler
28
+ >>> from diffusers.utils import load_image
29
+ >>> import numpy as np
30
+ >>> import torch
31
+
32
+ >>> import cv2
33
+ >>> from PIL import Image
34
+ >>> # download an image
35
+ >>> image = load_image(
36
+ ... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
37
+ ... )
38
+ >>> image = np.array(image)
39
+ >>> mask_image = load_image(
40
+ ... "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
41
+ ... )
42
+ >>> mask_image = np.array(mask_image)
43
+ >>> # get canny image
44
+ >>> canny_image = cv2.Canny(image, 100, 200)
45
+ >>> canny_image = canny_image[:, :, None]
46
+ >>> canny_image = np.concatenate([canny_image, canny_image, canny_image], axis=2)
47
+ >>> canny_image = Image.fromarray(canny_image)
48
+
49
+ >>> # load control net and stable diffusion v1-5
50
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
51
+ >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
52
+ ... "runwayml/stable-diffusion-inpainting", controlnet=controlnet, torch_dtype=torch.float16
53
+ ... )
54
+
55
+ >>> # speed up diffusion process with faster scheduler and memory optimization
56
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
57
+ >>> # remove following line if xformers is not installed
58
+ >>> pipe.enable_xformers_memory_efficient_attention()
59
+
60
+ >>> pipe.enable_model_cpu_offload()
61
+
62
+ >>> # generate image
63
+ >>> generator = torch.manual_seed(0)
64
+ >>> image = pipe(
65
+ ... "futuristic-looking doggo",
66
+ ... num_inference_steps=20,
67
+ ... generator=generator,
68
+ ... image=image,
69
+ ... control_image=canny_image,
70
+ ... mask_image=mask_image
71
+ ... ).images[0]
72
+ ```
73
+ """
74
+
75
+
76
+ def prepare_mask_and_masked_image(image, mask):
77
+ """
78
+ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
79
+ converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
80
+ ``image`` and ``1`` for the ``mask``.
81
+ The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
82
+ binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
83
+ Args:
84
+ image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
85
+ It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
86
+ ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
87
+ mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
88
+ It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
89
+ ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
90
+ Raises:
91
+ ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
92
+ should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
93
+ TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
94
+ (ot the other way around).
95
+ Returns:
96
+ tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
97
+ dimensions: ``batch x channels x height x width``.
98
+ """
99
+ if isinstance(image, torch.Tensor):
100
+ if not isinstance(mask, torch.Tensor):
101
+ raise TypeError(
102
+ f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not"
103
+ )
104
+
105
+ # Batch single image
106
+ if image.ndim == 3:
107
+ assert (
108
+ image.shape[0] == 3
109
+ ), "Image outside a batch should be of shape (3, H, W)"
110
+ image = image.unsqueeze(0)
111
+
112
+ # Batch and add channel dim for single mask
113
+ if mask.ndim == 2:
114
+ mask = mask.unsqueeze(0).unsqueeze(0)
115
+
116
+ # Batch single mask or add channel dim
117
+ if mask.ndim == 3:
118
+ # Single batched mask, no channel dim or single mask not batched but channel dim
119
+ if mask.shape[0] == 1:
120
+ mask = mask.unsqueeze(0)
121
+
122
+ # Batched masks no channel dim
123
+ else:
124
+ mask = mask.unsqueeze(1)
125
+
126
+ assert (
127
+ image.ndim == 4 and mask.ndim == 4
128
+ ), "Image and Mask must have 4 dimensions"
129
+ assert (
130
+ image.shape[-2:] == mask.shape[-2:]
131
+ ), "Image and Mask must have the same spatial dimensions"
132
+ assert (
133
+ image.shape[0] == mask.shape[0]
134
+ ), "Image and Mask must have the same batch size"
135
+
136
+ # Check image is in [-1, 1]
137
+ if image.min() < -1 or image.max() > 1:
138
+ raise ValueError("Image should be in [-1, 1] range")
139
+
140
+ # Check mask is in [0, 1]
141
+ if mask.min() < 0 or mask.max() > 1:
142
+ raise ValueError("Mask should be in [0, 1] range")
143
+
144
+ # Binarize mask
145
+ mask[mask < 0.5] = 0
146
+ mask[mask >= 0.5] = 1
147
+
148
+ # Image as float32
149
+ image = image.to(dtype=torch.float32)
150
+ elif isinstance(mask, torch.Tensor):
151
+ raise TypeError(
152
+ f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not"
153
+ )
154
+ else:
155
+ # preprocess image
156
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
157
+ image = [image]
158
+
159
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
160
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
161
+ image = np.concatenate(image, axis=0)
162
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
163
+ image = np.concatenate([i[None, :] for i in image], axis=0)
164
+
165
+ image = image.transpose(0, 3, 1, 2)
166
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
167
+
168
+ # preprocess mask
169
+ if isinstance(mask, (PIL.Image.Image, np.ndarray)):
170
+ mask = [mask]
171
+
172
+ if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
173
+ mask = np.concatenate(
174
+ [np.array(m.convert("L"))[None, None, :] for m in mask], axis=0
175
+ )
176
+ mask = mask.astype(np.float32) / 255.0
177
+ elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
178
+ mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
179
+
180
+ mask[mask < 0.5] = 0
181
+ mask[mask >= 0.5] = 1
182
+ mask = torch.from_numpy(mask)
183
+
184
+ masked_image = image * (mask < 0.5)
185
+
186
+ return mask, masked_image
187
+
188
+
189
+ class StableDiffusionControlNetInpaintPipeline(
190
+ StableDiffusionControlNetPipeline
191
+ ):
192
+ r"""
193
+ Pipeline for text-guided image inpainting using Stable Diffusion with ControlNet guidance.
194
+
195
+ This model inherits from [`StableDiffusionControlNetPipeline`]. Check the superclass documentation for the generic methods the
196
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
197
+
198
+ Args:
199
+ vae ([`AutoencoderKL`]):
200
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
201
+ text_encoder ([`CLIPTextModel`]):
202
+ Frozen text-encoder. Stable Diffusion uses the text portion of
203
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
204
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
205
+ tokenizer (`CLIPTokenizer`):
206
+ Tokenizer of class
207
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
208
+ unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
209
+ controlnet ([`ControlNetModel`]):
210
+ Provides additional conditioning to the unet during the denoising process
211
+ scheduler ([`SchedulerMixin`]):
212
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
213
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
214
+ safety_checker ([`StableDiffusionSafetyChecker`]):
215
+ Classification module that estimates whether generated images could be considered offensive or harmful.
216
+ Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
217
+ feature_extractor ([`CLIPFeatureExtractor`]):
218
+ Model that extracts features from generated images to be used as inputs for the `safety_checker`.
219
+ """
220
+
221
+ def prepare_mask_latents(
222
+ self,
223
+ mask,
224
+ masked_image,
225
+ batch_size,
226
+ height,
227
+ width,
228
+ dtype,
229
+ device,
230
+ generator,
231
+ do_classifier_free_guidance,
232
+ ):
233
+ # resize the mask to latents shape as we concatenate the mask to the latents
234
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
235
+ # and half precision
236
+ mask = torch.nn.functional.interpolate(
237
+ mask,
238
+ size=(
239
+ height // self.vae_scale_factor,
240
+ width // self.vae_scale_factor,
241
+ ),
242
+ )
243
+ mask = mask.to(device=device, dtype=dtype)
244
+
245
+ masked_image = masked_image.to(device=device, dtype=dtype)
246
+
247
+ # encode the mask image into latents space so we can concatenate it to the latents
248
+ if isinstance(generator, list):
249
+ masked_image_latents = [
250
+ self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(
251
+ generator=generator[i]
252
+ )
253
+ for i in range(batch_size)
254
+ ]
255
+ masked_image_latents = torch.cat(masked_image_latents, dim=0)
256
+ else:
257
+ masked_image_latents = self.vae.encode(
258
+ masked_image
259
+ ).latent_dist.sample(generator=generator)
260
+ masked_image_latents = (
261
+ self.vae.config.scaling_factor * masked_image_latents
262
+ )
263
+
264
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
265
+ if mask.shape[0] < batch_size:
266
+ if not batch_size % mask.shape[0] == 0:
267
+ raise ValueError(
268
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
269
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
270
+ " of masks that you pass is divisible by the total requested batch size."
271
+ )
272
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
273
+ if masked_image_latents.shape[0] < batch_size:
274
+ if not batch_size % masked_image_latents.shape[0] == 0:
275
+ raise ValueError(
276
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
277
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
278
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
279
+ )
280
+ masked_image_latents = masked_image_latents.repeat(
281
+ batch_size // masked_image_latents.shape[0], 1, 1, 1
282
+ )
283
+
284
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
285
+ masked_image_latents = (
286
+ torch.cat([masked_image_latents] * 2)
287
+ if do_classifier_free_guidance
288
+ else masked_image_latents
289
+ )
290
+
291
+ # aligning device to prevent device errors when concating it with the latent model input
292
+ masked_image_latents = masked_image_latents.to(
293
+ device=device, dtype=dtype
294
+ )
295
+ return mask, masked_image_latents
296
+
297
+ @torch.no_grad()
298
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
299
+ def __call__(
300
+ self,
301
+ prompt: Union[str, List[str]] = None,
302
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
303
+ control_image: Union[
304
+ torch.FloatTensor,
305
+ PIL.Image.Image,
306
+ List[torch.FloatTensor],
307
+ List[PIL.Image.Image],
308
+ ] = None,
309
+ mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
310
+ height: Optional[int] = None,
311
+ width: Optional[int] = None,
312
+ num_inference_steps: int = 50,
313
+ guidance_scale: float = 7.5,
314
+ negative_prompt: Optional[Union[str, List[str]]] = None,
315
+ num_images_per_prompt: Optional[int] = 1,
316
+ eta: float = 0.0,
317
+ generator: Optional[
318
+ Union[torch.Generator, List[torch.Generator]]
319
+ ] = None,
320
+ latents: Optional[torch.FloatTensor] = None,
321
+ prompt_embeds: Optional[torch.FloatTensor] = None,
322
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
323
+ output_type: Optional[str] = "pil",
324
+ return_dict: bool = True,
325
+ callback: Optional[
326
+ Callable[[int, int, torch.FloatTensor], None]
327
+ ] = None,
328
+ callback_steps: int = 1,
329
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
330
+ controlnet_conditioning_scale: float = 1.0,
331
+ ):
332
+ r"""
333
+ Function invoked when calling the pipeline for generation.
334
+ Args:
335
+ prompt (`str` or `List[str]`, *optional*):
336
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
337
+ instead.
338
+ image (`PIL.Image.Image`):
339
+ `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
340
+ be masked out with `mask_image` and repainted according to `prompt`.
341
+ control_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
342
+ The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
343
+ the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
344
+ also be accepted as an image. The control image is automatically resized to fit the output image.
345
+ mask_image (`PIL.Image.Image`):
346
+ `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
347
+ repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
348
+ to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
349
+ instead of 3, so the expected shape would be `(B, H, W, 1)`.
350
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
351
+ The height in pixels of the generated image.
352
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
353
+ The width in pixels of the generated image.
354
+ num_inference_steps (`int`, *optional*, defaults to 50):
355
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
356
+ expense of slower inference.
357
+ guidance_scale (`float`, *optional*, defaults to 7.5):
358
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
359
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
360
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
361
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
362
+ usually at the expense of lower image quality.
363
+ negative_prompt (`str` or `List[str]`, *optional*):
364
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
365
+ `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
366
+ Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
367
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
368
+ The number of images to generate per prompt.
369
+ eta (`float`, *optional*, defaults to 0.0):
370
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
371
+ [`schedulers.DDIMScheduler`], will be ignored for others.
372
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
373
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
374
+ to make generation deterministic.
375
+ latents (`torch.FloatTensor`, *optional*):
376
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
377
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
378
+ tensor will ge generated by sampling using the supplied random `generator`.
379
+ prompt_embeds (`torch.FloatTensor`, *optional*):
380
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
381
+ provided, text embeddings will be generated from `prompt` input argument.
382
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
383
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
384
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
385
+ argument.
386
+ output_type (`str`, *optional*, defaults to `"pil"`):
387
+ The output format of the generate image. Choose between
388
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
389
+ return_dict (`bool`, *optional*, defaults to `True`):
390
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
391
+ plain tuple.
392
+ callback (`Callable`, *optional*):
393
+ A function that will be called every `callback_steps` steps during inference. The function will be
394
+ called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
395
+ callback_steps (`int`, *optional*, defaults to 1):
396
+ The frequency at which the `callback` function will be called. If not specified, the callback will be
397
+ called at every step.
398
+ cross_attention_kwargs (`dict`, *optional*):
399
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
400
+ `self.processor` in
401
+ [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
402
+ controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
403
+ The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
404
+ to the residual in the original unet.
405
+ Examples:
406
+ Returns:
407
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
408
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
409
+ When returning a tuple, the first element is a list with the generated images, and the second element is a
410
+ list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
411
+ (nsfw) content, according to the `safety_checker`.
412
+ """
413
+ # 0. Default height and width to unet
414
+ height, width = self._default_height_width(height, width, control_image)
415
+
416
+ # 1. Check inputs. Raise error if not correct
417
+ self.check_inputs(
418
+ prompt,
419
+ control_image,
420
+ height,
421
+ width,
422
+ callback_steps,
423
+ negative_prompt,
424
+ prompt_embeds,
425
+ negative_prompt_embeds,
426
+ )
427
+
428
+ # 2. Define call parameters
429
+ if prompt is not None and isinstance(prompt, str):
430
+ batch_size = 1
431
+ elif prompt is not None and isinstance(prompt, list):
432
+ batch_size = len(prompt)
433
+ else:
434
+ batch_size = prompt_embeds.shape[0]
435
+
436
+ device = self._execution_device
437
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
438
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
439
+ # corresponds to doing no classifier free guidance.
440
+ do_classifier_free_guidance = guidance_scale > 1.0
441
+
442
+ # 3. Encode input prompt
443
+ prompt_embeds = self._encode_prompt(
444
+ prompt,
445
+ device,
446
+ num_images_per_prompt,
447
+ do_classifier_free_guidance,
448
+ negative_prompt,
449
+ prompt_embeds=prompt_embeds,
450
+ negative_prompt_embeds=negative_prompt_embeds,
451
+ )
452
+
453
+ # 4. Prepare image
454
+ control_image = self.prepare_image(
455
+ control_image,
456
+ width,
457
+ height,
458
+ batch_size * num_images_per_prompt,
459
+ num_images_per_prompt,
460
+ device,
461
+ self.controlnet.dtype,
462
+ )
463
+
464
+ if do_classifier_free_guidance:
465
+ control_image = torch.cat([control_image] * 2)
466
+
467
+ # 5. Prepare timesteps
468
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
469
+ timesteps = self.scheduler.timesteps
470
+
471
+ # 6. Prepare latent variables
472
+ num_channels_latents = self.controlnet.in_channels
473
+ latents = self.prepare_latents(
474
+ batch_size * num_images_per_prompt,
475
+ num_channels_latents,
476
+ height,
477
+ width,
478
+ prompt_embeds.dtype,
479
+ device,
480
+ generator,
481
+ latents,
482
+ )
483
+
484
+ # EXTRA: prepare mask latents
485
+ mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
486
+ mask, masked_image_latents = self.prepare_mask_latents(
487
+ mask,
488
+ masked_image,
489
+ batch_size * num_images_per_prompt,
490
+ height,
491
+ width,
492
+ prompt_embeds.dtype,
493
+ device,
494
+ generator,
495
+ do_classifier_free_guidance,
496
+ )
497
+
498
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
499
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
500
+
501
+ # 8. Denoising loop
502
+ num_warmup_steps = (
503
+ len(timesteps) - num_inference_steps * self.scheduler.order
504
+ )
505
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
506
+ for i, t in enumerate(timesteps):
507
+ # expand the latents if we are doing classifier free guidance
508
+ latent_model_input = (
509
+ torch.cat([latents] * 2)
510
+ if do_classifier_free_guidance
511
+ else latents
512
+ )
513
+ latent_model_input = self.scheduler.scale_model_input(
514
+ latent_model_input, t
515
+ )
516
+
517
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
518
+ latent_model_input,
519
+ t,
520
+ encoder_hidden_states=prompt_embeds,
521
+ controlnet_cond=control_image,
522
+ return_dict=False,
523
+ )
524
+
525
+ down_block_res_samples = [
526
+ down_block_res_sample * controlnet_conditioning_scale
527
+ for down_block_res_sample in down_block_res_samples
528
+ ]
529
+ mid_block_res_sample *= controlnet_conditioning_scale
530
+
531
+ # predict the noise residual
532
+ latent_model_input = torch.cat(
533
+ [latent_model_input, mask, masked_image_latents], dim=1
534
+ )
535
+ noise_pred = self.unet(
536
+ latent_model_input,
537
+ t,
538
+ encoder_hidden_states=prompt_embeds,
539
+ cross_attention_kwargs=cross_attention_kwargs,
540
+ down_block_additional_residuals=down_block_res_samples,
541
+ mid_block_additional_residual=mid_block_res_sample,
542
+ ).sample
543
+
544
+ # perform guidance
545
+ if do_classifier_free_guidance:
546
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
547
+ noise_pred = noise_pred_uncond + guidance_scale * (
548
+ noise_pred_text - noise_pred_uncond
549
+ )
550
+
551
+ # compute the previous noisy sample x_t -> x_t-1
552
+ latents = self.scheduler.step(
553
+ noise_pred, t, latents, **extra_step_kwargs
554
+ ).prev_sample
555
+
556
+ # call the callback, if provided
557
+ if i == len(timesteps) - 1 or (
558
+ (i + 1) > num_warmup_steps
559
+ and (i + 1) % self.scheduler.order == 0
560
+ ):
561
+ progress_bar.update()
562
+ if callback is not None and i % callback_steps == 0:
563
+ callback(i, t, latents)
564
+
565
+ # If we do sequential model offloading, let's offload unet and controlnet
566
+ # manually for max memory savings
567
+ if (
568
+ hasattr(self, "final_offload_hook")
569
+ and self.final_offload_hook is not None
570
+ ):
571
+ self.unet.to("cpu")
572
+ self.controlnet.to("cpu")
573
+ torch.cuda.empty_cache()
574
+
575
+ if output_type == "latent":
576
+ image = latents
577
+ has_nsfw_concept = None
578
+ elif output_type == "pil":
579
+ # 8. Post-processing
580
+ image = self.decode_latents(latents)
581
+
582
+ # 9. Run safety checker
583
+ image, has_nsfw_concept = self.run_safety_checker(
584
+ image, device, prompt_embeds.dtype
585
+ )
586
+
587
+ # 10. Convert to PIL
588
+ image = self.numpy_to_pil(image)
589
+ else:
590
+ # 8. Post-processing
591
+ image = self.decode_latents(latents)
592
+
593
+ # 9. Run safety checker
594
+ image, has_nsfw_concept = self.run_safety_checker(
595
+ image, device, prompt_embeds.dtype
596
+ )
597
+
598
+ # Offload last model to CPU
599
+ if (
600
+ hasattr(self, "final_offload_hook")
601
+ and self.final_offload_hook is not None
602
+ ):
603
+ self.final_offload_hook.offload()
604
+
605
+ if not return_dict:
606
+ return (image, has_nsfw_concept)
607
+
608
+ return StableDiffusionPipelineOutput(
609
+ images=image, nsfw_content_detected=has_nsfw_concept
610
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_mlsd.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import MLSDdetector
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from PIL import Image
6
+
7
+ from diffusion_webui.utils.model_list import stable_model_list
8
+ from diffusion_webui.utils.scheduler_list import (
9
+ SCHEDULER_LIST,
10
+ get_scheduler_list,
11
+ )
12
+
13
+
14
+ class StableDiffusionControlNetMLSDGenerator:
15
+ def __init__(self):
16
+ self.pipe = None
17
+
18
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
19
+ if self.pipe is None:
20
+ controlnet = ControlNetModel.from_pretrained(
21
+ controlnet_model_path, torch_dtype=torch.float16
22
+ )
23
+
24
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
25
+ pretrained_model_name_or_path=stable_model_path,
26
+ controlnet=controlnet,
27
+ safety_checker=None,
28
+ torch_dtype=torch.float16,
29
+ )
30
+
31
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
32
+ self.pipe.to("cuda")
33
+ self.pipe.enable_xformers_memory_efficient_attention()
34
+
35
+ return self.pipe
36
+
37
+ def controlnet_mlsd(self, image_path: str):
38
+ mlsd = MLSDdetector.from_pretrained("lllyasviel/ControlNet")
39
+
40
+ image = Image.open(image_path)
41
+ image = mlsd(image)
42
+
43
+ return image
44
+
45
+ def generate_image(
46
+ self,
47
+ image_path: str,
48
+ model_path: str,
49
+ prompt: str,
50
+ negative_prompt: str,
51
+ num_images_per_prompt: int,
52
+ guidance_scale: int,
53
+ num_inference_step: int,
54
+ scheduler: str,
55
+ seed_generator: int,
56
+ ):
57
+ image = self.controlnet_mlsd(image_path=image_path)
58
+
59
+ pipe = self.load_model(
60
+ stable_model_path=model_path,
61
+ controlnet_model_path="lllyasviel/sd-controlnet-mlsd",
62
+ scheduler=scheduler,
63
+ )
64
+
65
+ if seed_generator == 0:
66
+ random_seed = torch.randint(0, 1000000, (1,))
67
+ generator = torch.manual_seed(random_seed)
68
+ else:
69
+ generator = torch.manual_seed(seed_generator)
70
+
71
+ output = pipe(
72
+ prompt=prompt,
73
+ image=image,
74
+ negative_prompt=negative_prompt,
75
+ num_images_per_prompt=num_images_per_prompt,
76
+ num_inference_steps=num_inference_step,
77
+ guidance_scale=guidance_scale,
78
+ generator=generator,
79
+ ).images
80
+
81
+ return output
82
+
83
+ def app():
84
+ with gr.Blocks():
85
+ with gr.Row():
86
+ with gr.Column():
87
+ controlnet_mlsd_image_file = gr.Image(
88
+ type="filepath", label="Image"
89
+ )
90
+
91
+ controlnet_mlsd_prompt = gr.Textbox(
92
+ lines=1,
93
+ show_label=False,
94
+ placeholder="Prompt",
95
+ )
96
+
97
+ controlnet_mlsd_negative_prompt = gr.Textbox(
98
+ lines=1,
99
+ show_label=False,
100
+ placeholder="Negative Prompt",
101
+ )
102
+
103
+ with gr.Row():
104
+ with gr.Column():
105
+ controlnet_mlsd_model_id = gr.Dropdown(
106
+ choices=stable_model_list,
107
+ value=stable_model_list[0],
108
+ label="Stable Model Id",
109
+ )
110
+ controlnet_mlsd_guidance_scale = gr.Slider(
111
+ minimum=0.1,
112
+ maximum=15,
113
+ step=0.1,
114
+ value=7.5,
115
+ label="Guidance Scale",
116
+ )
117
+ controlnet_mlsd_num_inference_step = gr.Slider(
118
+ minimum=1,
119
+ maximum=100,
120
+ step=1,
121
+ value=50,
122
+ label="Num Inference Step",
123
+ )
124
+
125
+ with gr.Row():
126
+ with gr.Column():
127
+ controlnet_mlsd_scheduler = gr.Dropdown(
128
+ choices=SCHEDULER_LIST,
129
+ value=SCHEDULER_LIST[0],
130
+ label="Scheduler",
131
+ )
132
+
133
+ controlnet_mlsd_seed_generator = gr.Slider(
134
+ minimum=0,
135
+ maximum=1000000,
136
+ step=1,
137
+ value=0,
138
+ label="Seed Generator",
139
+ )
140
+ controlnet_mlsd_num_images_per_prompt = (
141
+ gr.Slider(
142
+ minimum=1,
143
+ maximum=10,
144
+ step=1,
145
+ value=1,
146
+ label="Number Of Images",
147
+ )
148
+ )
149
+
150
+ controlnet_mlsd_predict = gr.Button(value="Generator")
151
+
152
+ with gr.Column():
153
+ output_image = gr.Gallery(
154
+ label="Generated images",
155
+ show_label=False,
156
+ elem_id="gallery",
157
+ ).style(grid=(1, 2))
158
+
159
+ controlnet_mlsd_predict.click(
160
+ fn=StableDiffusionControlNetMLSDGenerator().generate_image,
161
+ inputs=[
162
+ controlnet_mlsd_image_file,
163
+ controlnet_mlsd_model_id,
164
+ controlnet_mlsd_prompt,
165
+ controlnet_mlsd_negative_prompt,
166
+ controlnet_mlsd_num_images_per_prompt,
167
+ controlnet_mlsd_guidance_scale,
168
+ controlnet_mlsd_num_inference_step,
169
+ controlnet_mlsd_scheduler,
170
+ controlnet_mlsd_seed_generator,
171
+ ],
172
+ outputs=output_image,
173
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_pose.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import OpenposeDetector
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
+
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_pose_model_list,
13
+ stable_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+
21
+ class StableDiffusionControlNetPoseGenerator:
22
+ def __init__(self):
23
+ self.pipe = None
24
+
25
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
26
+ if self.pipe is None:
27
+ controlnet = ControlNetModel.from_pretrained(
28
+ controlnet_model_path, torch_dtype=torch.float16
29
+ )
30
+
31
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
32
+ pretrained_model_name_or_path=stable_model_path,
33
+ controlnet=controlnet,
34
+ safety_checker=None,
35
+ torch_dtype=torch.float16,
36
+ )
37
+
38
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
39
+ self.pipe.to("cuda")
40
+ self.pipe.enable_xformers_memory_efficient_attention()
41
+
42
+ return self.pipe
43
+
44
+ def controlnet_pose(self, image_path: str):
45
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
46
+
47
+ image = Image.open(image_path)
48
+ image = openpose(image)
49
+
50
+ return image
51
+
52
+ def generate_image(
53
+ self,
54
+ image_path: str,
55
+ stable_model_path: str,
56
+ controlnet_pose_model_path: str,
57
+ prompt: str,
58
+ negative_prompt: str,
59
+ num_images_per_prompt: int,
60
+ guidance_scale: int,
61
+ num_inference_step: int,
62
+ scheduler: str,
63
+ seed_generator: int,
64
+ ):
65
+
66
+ image = self.controlnet_pose(image_path=image_path)
67
+
68
+ pipe = self.load_model(
69
+ stable_model_path=stable_model_path,
70
+ controlnet_model_path=controlnet_pose_model_path,
71
+ scheduler=scheduler,
72
+ )
73
+
74
+ if seed_generator == 0:
75
+ random_seed = torch.randint(0, 1000000, (1,))
76
+ generator = torch.manual_seed(random_seed)
77
+ else:
78
+ generator = torch.manual_seed(seed_generator)
79
+
80
+ output = pipe(
81
+ prompt=prompt,
82
+ image=image,
83
+ negative_prompt=negative_prompt,
84
+ num_images_per_prompt=num_images_per_prompt,
85
+ num_inference_steps=num_inference_step,
86
+ guidance_scale=guidance_scale,
87
+ generator=generator,
88
+ ).images
89
+
90
+ return output
91
+
92
+ def app():
93
+ with gr.Blocks():
94
+ with gr.Row():
95
+ with gr.Column():
96
+ controlnet_pose_image_file = gr.Image(
97
+ type="filepath", label="Image"
98
+ )
99
+
100
+ controlnet_pose_prompt = gr.Textbox(
101
+ lines=1,
102
+ show_label=False,
103
+ placeholder="Prompt",
104
+ )
105
+
106
+ controlnet_pose_negative_prompt = gr.Textbox(
107
+ lines=1,
108
+ show_label=False,
109
+ placeholder="Negative Prompt",
110
+ )
111
+
112
+ with gr.Row():
113
+ with gr.Column():
114
+ controlnet_pose_stable_model_id = gr.Dropdown(
115
+ choices=stable_model_list,
116
+ value=stable_model_list[0],
117
+ label="Stable Model Id",
118
+ )
119
+ controlnet_pose_guidance_scale = gr.Slider(
120
+ minimum=0.1,
121
+ maximum=15,
122
+ step=0.1,
123
+ value=7.5,
124
+ label="Guidance Scale",
125
+ )
126
+
127
+ controlnet_pose_num_inference_step = gr.Slider(
128
+ minimum=1,
129
+ maximum=100,
130
+ step=1,
131
+ value=50,
132
+ label="Num Inference Step",
133
+ )
134
+
135
+ controlnet_pose_num_images_per_prompt = gr.Slider(
136
+ minimum=1,
137
+ maximum=10,
138
+ step=1,
139
+ value=1,
140
+ label="Number Of Images",
141
+ )
142
+
143
+ with gr.Row():
144
+ with gr.Column():
145
+ controlnet_pose_model_id = gr.Dropdown(
146
+ choices=controlnet_pose_model_list,
147
+ value=controlnet_pose_model_list[0],
148
+ label="ControlNet Model Id",
149
+ )
150
+
151
+ controlnet_pose_scheduler = gr.Dropdown(
152
+ choices=SCHEDULER_LIST,
153
+ value=SCHEDULER_LIST[0],
154
+ label="Scheduler",
155
+ )
156
+
157
+ controlnet_pose_seed_generator = gr.Number(
158
+ minimum=0,
159
+ maximum=1000000,
160
+ step=1,
161
+ value=0,
162
+ label="Seed Generator",
163
+ )
164
+
165
+ controlnet_pose_predict = gr.Button(value="Generator")
166
+
167
+ with gr.Column():
168
+ output_image = gr.Gallery(
169
+ label="Generated images",
170
+ show_label=False,
171
+ elem_id="gallery",
172
+ ).style(grid=(1, 2))
173
+
174
+ controlnet_pose_predict.click(
175
+ fn=StableDiffusionControlNetPoseGenerator().generate_image,
176
+ inputs=[
177
+ controlnet_pose_image_file,
178
+ controlnet_pose_stable_model_id,
179
+ controlnet_pose_model_id,
180
+ controlnet_pose_prompt,
181
+ controlnet_pose_negative_prompt,
182
+ controlnet_pose_num_images_per_prompt,
183
+ controlnet_pose_guidance_scale,
184
+ controlnet_pose_num_inference_step,
185
+ controlnet_pose_scheduler,
186
+ controlnet_pose_seed_generator,
187
+ ],
188
+ outputs=output_image,
189
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_scribble.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from controlnet_aux import HEDdetector
4
+ from diffusers import (
5
+ ControlNetModel,
6
+ StableDiffusionControlNetPipeline,
7
+ UniPCMultistepScheduler,
8
+ )
9
+ from PIL import Image
10
+
11
+ from diffusion_webui.utils.model_list import (
12
+ controlnet_scribble_model_list,
13
+ stable_model_list,
14
+ )
15
+ from diffusion_webui.utils.scheduler_list import (
16
+ SCHEDULER_LIST,
17
+ get_scheduler_list,
18
+ )
19
+
20
+
21
+ class StableDiffusionControlNetScribbleGenerator:
22
+ def __init__(self):
23
+ self.pipe = None
24
+
25
+ def load_model(self, stable_model_path, controlnet_model_path, scheduler):
26
+ if self.pipe is None:
27
+ controlnet = ControlNetModel.from_pretrained(
28
+ controlnet_model_path, torch_dtype=torch.float16
29
+ )
30
+
31
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
32
+ pretrained_model_name_or_path=stable_model_path,
33
+ controlnet=controlnet,
34
+ safety_checker=None,
35
+ torch_dtype=torch.float16,
36
+ )
37
+
38
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
39
+ self.pipe.to("cuda")
40
+ self.pipe.enable_xformers_memory_efficient_attention()
41
+
42
+ return self.pipe
43
+
44
+ def controlnet_scribble(self, image_path: str):
45
+ hed = HEDdetector.from_pretrained("lllyasviel/ControlNet")
46
+
47
+ image = Image.open(image_path)
48
+ image = hed(image, scribble=True)
49
+
50
+ return image
51
+
52
+ def generate_image(
53
+ self,
54
+ image_path: str,
55
+ stable_model_path: str,
56
+ controlnet_hed_model_path: str,
57
+ prompt: str,
58
+ negative_prompt: str,
59
+ num_images_per_prompt: int,
60
+ guidance_scale: int,
61
+ num_inference_step: int,
62
+ scheduler: str,
63
+ seed_generator: int,
64
+ ):
65
+
66
+ image = self.controlnet_scribble(image_path=image_path)
67
+
68
+ pipe = self.load_model(
69
+ stable_model_path=stable_model_path,
70
+ controlnet_model_path=controlnet_hed_model_path,
71
+ scheduler=scheduler,
72
+ )
73
+ if seed_generator == 0:
74
+ random_seed = torch.randint(0, 1000000, (1,))
75
+ generator = torch.manual_seed(random_seed)
76
+ else:
77
+ generator = torch.manual_seed(seed_generator)
78
+
79
+ output = pipe(
80
+ prompt=prompt,
81
+ image=image,
82
+ negative_prompt=negative_prompt,
83
+ num_images_per_prompt=num_images_per_prompt,
84
+ num_inference_steps=num_inference_step,
85
+ guidance_scale=guidance_scale,
86
+ generator=generator,
87
+ ).images
88
+
89
+ return output
90
+
91
+ def app():
92
+ with gr.Blocks():
93
+ with gr.Row():
94
+ with gr.Column():
95
+ controlnet_scribble_image_file = gr.Image(
96
+ type="filepath", label="Image"
97
+ )
98
+ controlnet_scribble_prompt = gr.Textbox(
99
+ lines=1,
100
+ show_label=False,
101
+ placeholder="Prompt",
102
+ )
103
+
104
+ controlnet_scribble_negative_prompt = gr.Textbox(
105
+ lines=1,
106
+ show_label=False,
107
+ placeholder="Negative Prompt",
108
+ )
109
+
110
+ with gr.Row():
111
+ with gr.Column():
112
+ controlnet_scribble_stable_model_id = gr.Dropdown(
113
+ choices=stable_model_list,
114
+ value=stable_model_list[0],
115
+ label="Stable Model Id",
116
+ )
117
+ controlnet_scribble_guidance_scale = gr.Slider(
118
+ minimum=0.1,
119
+ maximum=15,
120
+ step=0.1,
121
+ value=7.5,
122
+ label="Guidance Scale",
123
+ )
124
+
125
+ controlnet_scribble_num_inference_step = gr.Slider(
126
+ minimum=1,
127
+ maximum=100,
128
+ step=1,
129
+ value=50,
130
+ label="Num Inference Step",
131
+ )
132
+
133
+ controlnet_scribble_num_images_per_prompt = (
134
+ gr.Slider(
135
+ minimum=1,
136
+ maximum=10,
137
+ step=1,
138
+ value=1,
139
+ label="Number Of Images",
140
+ )
141
+ )
142
+ with gr.Row():
143
+ with gr.Column():
144
+ controlnet_scribble_model_id = gr.Dropdown(
145
+ choices=controlnet_scribble_model_list,
146
+ value=controlnet_scribble_model_list[0],
147
+ label="ControlNet Model Id",
148
+ )
149
+
150
+ controlnet_scribble_scheduler = gr.Dropdown(
151
+ choices=SCHEDULER_LIST,
152
+ value=SCHEDULER_LIST[0],
153
+ label="Scheduler",
154
+ )
155
+
156
+ controlnet_scribble_seed_generator = gr.Number(
157
+ minimum=0,
158
+ maximum=1000000,
159
+ step=1,
160
+ value=0,
161
+ label="Seed Generator",
162
+ )
163
+
164
+ controlnet_scribble_predict = gr.Button(value="Generator")
165
+
166
+ with gr.Column():
167
+ output_image = gr.Gallery(
168
+ label="Generated images",
169
+ show_label=False,
170
+ elem_id="gallery",
171
+ ).style(grid=(1, 2))
172
+
173
+ controlnet_scribble_predict.click(
174
+ fn=StableDiffusionControlNetScribbleGenerator().generate_image,
175
+ inputs=[
176
+ controlnet_scribble_image_file,
177
+ controlnet_scribble_stable_model_id,
178
+ controlnet_scribble_model_id,
179
+ controlnet_scribble_prompt,
180
+ controlnet_scribble_negative_prompt,
181
+ controlnet_scribble_num_images_per_prompt,
182
+ controlnet_scribble_guidance_scale,
183
+ controlnet_scribble_num_inference_step,
184
+ controlnet_scribble_scheduler,
185
+ controlnet_scribble_seed_generator,
186
+ ],
187
+ outputs=output_image,
188
+ )
diffusion_webui/diffusion_models/controlnet/controlnet_seg.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import torch
4
+ from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
5
+ from PIL import Image
6
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
7
+
8
+ from diffusion_webui.utils.model_list import stable_model_list
9
+ from diffusion_webui.utils.scheduler_list import (
10
+ SCHEDULER_LIST,
11
+ get_scheduler_list,
12
+ )
13
+
14
+
15
+ def ade_palette():
16
+ """ADE20K palette that maps each class to RGB values."""
17
+ return [
18
+ [120, 120, 120],
19
+ [180, 120, 120],
20
+ [6, 230, 230],
21
+ [80, 50, 50],
22
+ [4, 200, 3],
23
+ [120, 120, 80],
24
+ [140, 140, 140],
25
+ [204, 5, 255],
26
+ [230, 230, 230],
27
+ [4, 250, 7],
28
+ [224, 5, 255],
29
+ [235, 255, 7],
30
+ [150, 5, 61],
31
+ [120, 120, 70],
32
+ [8, 255, 51],
33
+ [255, 6, 82],
34
+ [143, 255, 140],
35
+ [204, 255, 4],
36
+ [255, 51, 7],
37
+ [204, 70, 3],
38
+ [0, 102, 200],
39
+ [61, 230, 250],
40
+ [255, 6, 51],
41
+ [11, 102, 255],
42
+ [255, 7, 71],
43
+ [255, 9, 224],
44
+ [9, 7, 230],
45
+ [220, 220, 220],
46
+ [255, 9, 92],
47
+ [112, 9, 255],
48
+ [8, 255, 214],
49
+ [7, 255, 224],
50
+ [255, 184, 6],
51
+ [10, 255, 71],
52
+ [255, 41, 10],
53
+ [7, 255, 255],
54
+ [224, 255, 8],
55
+ [102, 8, 255],
56
+ [255, 61, 6],
57
+ [255, 194, 7],
58
+ [255, 122, 8],
59
+ [0, 255, 20],
60
+ [255, 8, 41],
61
+ [255, 5, 153],
62
+ [6, 51, 255],
63
+ [235, 12, 255],
64
+ [160, 150, 20],
65
+ [0, 163, 255],
66
+ [140, 140, 140],
67
+ [250, 10, 15],
68
+ [20, 255, 0],
69
+ [31, 255, 0],
70
+ [255, 31, 0],
71
+ [255, 224, 0],
72
+ [153, 255, 0],
73
+ [0, 0, 255],
74
+ [255, 71, 0],
75
+ [0, 235, 255],
76
+ [0, 173, 255],
77
+ [31, 0, 255],
78
+ [11, 200, 200],
79
+ [255, 82, 0],
80
+ [0, 255, 245],
81
+ [0, 61, 255],
82
+ [0, 255, 112],
83
+ [0, 255, 133],
84
+ [255, 0, 0],
85
+ [255, 163, 0],
86
+ [255, 102, 0],
87
+ [194, 255, 0],
88
+ [0, 143, 255],
89
+ [51, 255, 0],
90
+ [0, 82, 255],
91
+ [0, 255, 41],
92
+ [0, 255, 173],
93
+ [10, 0, 255],
94
+ [173, 255, 0],
95
+ [0, 255, 153],
96
+ [255, 92, 0],
97
+ [255, 0, 255],
98
+ [255, 0, 245],
99
+ [255, 0, 102],
100
+ [255, 173, 0],
101
+ [255, 0, 20],
102
+ [255, 184, 184],
103
+ [0, 31, 255],
104
+ [0, 255, 61],
105
+ [0, 71, 255],
106
+ [255, 0, 204],
107
+ [0, 255, 194],
108
+ [0, 255, 82],
109
+ [0, 10, 255],
110
+ [0, 112, 255],
111
+ [51, 0, 255],
112
+ [0, 194, 255],
113
+ [0, 122, 255],
114
+ [0, 255, 163],
115
+ [255, 153, 0],
116
+ [0, 255, 10],
117
+ [255, 112, 0],
118
+ [143, 255, 0],
119
+ [82, 0, 255],
120
+ [163, 255, 0],
121
+ [255, 235, 0],
122
+ [8, 184, 170],
123
+ [133, 0, 255],
124
+ [0, 255, 92],
125
+ [184, 0, 255],
126
+ [255, 0, 31],
127
+ [0, 184, 255],
128
+ [0, 214, 255],
129
+ [255, 0, 112],
130
+ [92, 255, 0],
131
+ [0, 224, 255],
132
+ [112, 224, 255],
133
+ [70, 184, 160],
134
+ [163, 0, 255],
135
+ [153, 0, 255],
136
+ [71, 255, 0],
137
+ [255, 0, 163],
138
+ [255, 204, 0],
139
+ [255, 0, 143],
140
+ [0, 255, 235],
141
+ [133, 255, 0],
142
+ [255, 0, 235],
143
+ [245, 0, 255],
144
+ [255, 0, 122],
145
+ [255, 245, 0],
146
+ [10, 190, 212],
147
+ [214, 255, 0],
148
+ [0, 204, 255],
149
+ [20, 0, 255],
150
+ [255, 255, 0],
151
+ [0, 153, 255],
152
+ [0, 41, 255],
153
+ [0, 255, 204],
154
+ [41, 0, 255],
155
+ [41, 255, 0],
156
+ [173, 0, 255],
157
+ [0, 245, 255],
158
+ [71, 0, 255],
159
+ [122, 0, 255],
160
+ [0, 255, 184],
161
+ [0, 92, 255],
162
+ [184, 255, 0],
163
+ [0, 133, 255],
164
+ [255, 214, 0],
165
+ [25, 194, 194],
166
+ [102, 255, 0],
167
+ [92, 0, 255],
168
+ ]
169
+
170
+
171
+ class StableDiffusionControlNetSegGenerator:
172
+ def __init__(self):
173
+ self.pipe = None
174
+
175
+ def load_model(
176
+ self,
177
+ stable_model_path,
178
+ scheduler,
179
+ ):
180
+
181
+ if self.pipe is None:
182
+ controlnet = ControlNetModel.from_pretrained(
183
+ "lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16
184
+ )
185
+ self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
186
+ pretrained_model_name_or_path=stable_model_path,
187
+ controlnet=controlnet,
188
+ safety_checker=None,
189
+ torch_dtype=torch.float16,
190
+ )
191
+
192
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
193
+ self.pipe.to("cuda")
194
+ self.pipe.enable_xformers_memory_efficient_attention()
195
+
196
+ return self.pipe
197
+
198
+ def controlnet_seg(self, image_path: str):
199
+ image_processor = AutoImageProcessor.from_pretrained(
200
+ "openmmlab/upernet-convnext-small"
201
+ )
202
+ image_segmentor = UperNetForSemanticSegmentation.from_pretrained(
203
+ "openmmlab/upernet-convnext-small"
204
+ )
205
+
206
+ image = Image.open(image_path).convert("RGB")
207
+ pixel_values = image_processor(image, return_tensors="pt").pixel_values
208
+
209
+ with torch.no_grad():
210
+ outputs = image_segmentor(pixel_values)
211
+
212
+ seg = image_processor.post_process_semantic_segmentation(
213
+ outputs, target_sizes=[image.size[::-1]]
214
+ )[0]
215
+
216
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
217
+ palette = np.array(ade_palette())
218
+
219
+ for label, color in enumerate(palette):
220
+ color_seg[seg == label, :] = color
221
+
222
+ color_seg = color_seg.astype(np.uint8)
223
+ image = Image.fromarray(color_seg)
224
+
225
+ return image
226
+
227
+ def generate_image(
228
+ self,
229
+ image_path: str,
230
+ model_path: str,
231
+ prompt: str,
232
+ negative_prompt: str,
233
+ num_images_per_prompt: int,
234
+ guidance_scale: int,
235
+ num_inference_step: int,
236
+ scheduler: str,
237
+ seed_generator: int,
238
+ ):
239
+
240
+ image = self.controlnet_seg(image_path=image_path)
241
+ pipe = self.load_model(
242
+ stable_model_path=model_path,
243
+ scheduler=scheduler,
244
+ )
245
+ if seed_generator == 0:
246
+ random_seed = torch.randint(0, 1000000, (1,))
247
+ generator = torch.manual_seed(random_seed)
248
+ else:
249
+ generator = torch.manual_seed(seed_generator)
250
+
251
+ output = pipe(
252
+ prompt=prompt,
253
+ image=image,
254
+ negative_prompt=negative_prompt,
255
+ num_images_per_prompt=num_images_per_prompt,
256
+ num_inference_steps=num_inference_step,
257
+ guidance_scale=guidance_scale,
258
+ generator=generator,
259
+ ).images
260
+
261
+ return output
262
+
263
+ def app():
264
+ with gr.Blocks():
265
+ with gr.Row():
266
+ with gr.Column():
267
+ controlnet_seg_image_file = gr.Image(
268
+ type="filepath", label="Image"
269
+ )
270
+
271
+ controlnet_seg_prompt = gr.Textbox(
272
+ lines=1,
273
+ show_label=False,
274
+ placeholder="Prompt",
275
+ )
276
+
277
+ controlnet_seg_negative_prompt = gr.Textbox(
278
+ lines=1,
279
+ show_label=False,
280
+ placeholder="Negative Prompt",
281
+ )
282
+
283
+ with gr.Row():
284
+ with gr.Column():
285
+ controlnet_seg_model_id = gr.Dropdown(
286
+ choices=stable_model_list,
287
+ value=stable_model_list[0],
288
+ label="Stable Model Id",
289
+ )
290
+ controlnet_seg_guidance_scale = gr.Slider(
291
+ minimum=0.1,
292
+ maximum=15,
293
+ step=0.1,
294
+ value=7.5,
295
+ label="Guidance Scale",
296
+ )
297
+
298
+ controlnet_seg_num_inference_step = gr.Slider(
299
+ minimum=1,
300
+ maximum=100,
301
+ step=1,
302
+ value=50,
303
+ label="Num Inference Step",
304
+ )
305
+
306
+ with gr.Row():
307
+ with gr.Column():
308
+ controlnet_seg_scheduler = gr.Dropdown(
309
+ choices=SCHEDULER_LIST,
310
+ value=SCHEDULER_LIST[0],
311
+ label="Scheduler",
312
+ )
313
+ controlnet_seg_num_images_per_prompt = (
314
+ gr.Slider(
315
+ minimum=1,
316
+ maximum=10,
317
+ step=1,
318
+ value=1,
319
+ label="Number Of Images",
320
+ )
321
+ )
322
+ controlnet_seg_seed_generator = gr.Slider(
323
+ minimum=0,
324
+ maximum=1000000,
325
+ step=1,
326
+ value=0,
327
+ label="Seed Generator",
328
+ )
329
+
330
+ controlnet_seg_predict = gr.Button(value="Generator")
331
+
332
+ with gr.Column():
333
+ output_image = gr.Gallery(
334
+ label="Generated images",
335
+ show_label=False,
336
+ elem_id="gallery",
337
+ ).style(grid=(1, 2))
338
+
339
+ controlnet_seg_predict.click(
340
+ fn=StableDiffusionControlNetSegGenerator().generate_image,
341
+ inputs=[
342
+ controlnet_seg_image_file,
343
+ controlnet_seg_model_id,
344
+ controlnet_seg_prompt,
345
+ controlnet_seg_negative_prompt,
346
+ controlnet_seg_num_images_per_prompt,
347
+ controlnet_seg_guidance_scale,
348
+ controlnet_seg_num_inference_step,
349
+ controlnet_seg_scheduler,
350
+ controlnet_seg_seed_generator,
351
+ ],
352
+ outputs=[output_image],
353
+ )
diffusion_webui/diffusion_models/stable_diffusion/__init__.py ADDED
File without changes
diffusion_webui/diffusion_models/stable_diffusion/img2img_app.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionImg2ImgPipeline
4
+ from PIL import Image
5
+
6
+ from diffusion_webui.utils.model_list import stable_model_list
7
+ from diffusion_webui.utils.scheduler_list import (
8
+ SCHEDULER_LIST,
9
+ get_scheduler_list,
10
+ )
11
+
12
+
13
+ class StableDiffusionImage2ImageGenerator:
14
+ def __init__(self):
15
+ self.pipe = None
16
+
17
+ def load_model(self, model_path, scheduler):
18
+ if self.pipe is None:
19
+ self.pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
20
+ model_path, safety_checker=None, torch_dtype=torch.float16
21
+ )
22
+
23
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
24
+ self.pipe.to("cuda")
25
+ self.pipe.enable_xformers_memory_efficient_attention()
26
+
27
+ return self.pipe
28
+
29
+ def generate_image(
30
+ self,
31
+ image_path: str,
32
+ model_path: str,
33
+ prompt: str,
34
+ negative_prompt: str,
35
+ num_images_per_prompt: int,
36
+ scheduler: str,
37
+ guidance_scale: int,
38
+ num_inference_step: int,
39
+ seed_generator=0,
40
+ ):
41
+ pipe = self.load_model(
42
+ model_path=model_path,
43
+ scheduler=scheduler,
44
+ )
45
+
46
+ if seed_generator == 0:
47
+ random_seed = torch.randint(0, 1000000, (1,))
48
+ generator = torch.manual_seed(random_seed)
49
+ else:
50
+ generator = torch.manual_seed(seed_generator)
51
+
52
+ image = Image.open(image_path)
53
+ images = pipe(
54
+ prompt,
55
+ image=image,
56
+ negative_prompt=negative_prompt,
57
+ num_images_per_prompt=num_images_per_prompt,
58
+ num_inference_steps=num_inference_step,
59
+ guidance_scale=guidance_scale,
60
+ generator=generator,
61
+ ).images
62
+
63
+ return images
64
+
65
+ def app():
66
+ with gr.Blocks():
67
+ with gr.Row():
68
+ with gr.Column():
69
+ image2image_image_file = gr.Image(
70
+ type="filepath", label="Image"
71
+ ).style(height=260)
72
+
73
+ image2image_prompt = gr.Textbox(
74
+ lines=1,
75
+ placeholder="Prompt",
76
+ show_label=False,
77
+ )
78
+
79
+ image2image_negative_prompt = gr.Textbox(
80
+ lines=1,
81
+ placeholder="Negative Prompt",
82
+ show_label=False,
83
+ )
84
+
85
+ with gr.Row():
86
+ with gr.Column():
87
+ image2image_model_path = gr.Dropdown(
88
+ choices=stable_model_list,
89
+ value=stable_model_list[0],
90
+ label="Stable Model Id",
91
+ )
92
+
93
+ image2image_guidance_scale = gr.Slider(
94
+ minimum=0.1,
95
+ maximum=15,
96
+ step=0.1,
97
+ value=7.5,
98
+ label="Guidance Scale",
99
+ )
100
+ image2image_num_inference_step = gr.Slider(
101
+ minimum=1,
102
+ maximum=100,
103
+ step=1,
104
+ value=50,
105
+ label="Num Inference Step",
106
+ )
107
+ with gr.Row():
108
+ with gr.Column():
109
+ image2image_scheduler = gr.Dropdown(
110
+ choices=SCHEDULER_LIST,
111
+ value=SCHEDULER_LIST[0],
112
+ label="Scheduler",
113
+ )
114
+ image2image_num_images_per_prompt = gr.Slider(
115
+ minimum=1,
116
+ maximum=30,
117
+ step=1,
118
+ value=1,
119
+ label="Number Of Images",
120
+ )
121
+
122
+ image2image_seed_generator = gr.Slider(
123
+ minimum=0,
124
+ maximum=1000000,
125
+ step=1,
126
+ value=0,
127
+ label="Seed(0 for random)",
128
+ )
129
+
130
+ image2image_predict_button = gr.Button(value="Generator")
131
+
132
+ with gr.Column():
133
+ output_image = gr.Gallery(
134
+ label="Generated images",
135
+ show_label=False,
136
+ elem_id="gallery",
137
+ ).style(grid=(1, 2))
138
+
139
+ image2image_predict_button.click(
140
+ fn=StableDiffusionImage2ImageGenerator().generate_image,
141
+ inputs=[
142
+ image2image_image_file,
143
+ image2image_model_path,
144
+ image2image_prompt,
145
+ image2image_negative_prompt,
146
+ image2image_num_images_per_prompt,
147
+ image2image_scheduler,
148
+ image2image_guidance_scale,
149
+ image2image_num_inference_step,
150
+ image2image_seed_generator,
151
+ ],
152
+ outputs=[output_image],
153
+ )
diffusion_webui/diffusion_models/stable_diffusion/inpaint_app.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import DiffusionPipeline
4
+
5
+ from diffusion_webui.utils.model_list import stable_inpiant_model_list
6
+
7
+
8
+ class StableDiffusionInpaintGenerator:
9
+ def __init__(self):
10
+ self.pipe = None
11
+
12
+ def load_model(self, model_path):
13
+ if self.pipe is None:
14
+ self.pipe = DiffusionPipeline.from_pretrained(
15
+ model_path, revision="fp16", torch_dtype=torch.float16
16
+ )
17
+
18
+ self.pipe.to("cuda")
19
+ self.pipe.enable_xformers_memory_efficient_attention()
20
+
21
+ return self.pipe
22
+
23
+ def generate_image(
24
+ self,
25
+ pil_image: str,
26
+ model_path: str,
27
+ prompt: str,
28
+ negative_prompt: str,
29
+ num_images_per_prompt: int,
30
+ guidance_scale: int,
31
+ num_inference_step: int,
32
+ seed_generator=0,
33
+ ):
34
+ image = pil_image["image"].convert("RGB").resize((512, 512))
35
+ mask_image = pil_image["mask"].convert("RGB").resize((512, 512))
36
+ pipe = self.load_model(model_path)
37
+
38
+ if seed_generator == 0:
39
+ random_seed = torch.randint(0, 1000000, (1,))
40
+ generator = torch.manual_seed(random_seed)
41
+ else:
42
+ generator = torch.manual_seed(seed_generator)
43
+
44
+ output = pipe(
45
+ prompt=prompt,
46
+ image=image,
47
+ mask_image=mask_image,
48
+ negative_prompt=negative_prompt,
49
+ num_images_per_prompt=num_images_per_prompt,
50
+ num_inference_steps=num_inference_step,
51
+ guidance_scale=guidance_scale,
52
+ generator=generator,
53
+ ).images
54
+
55
+ return output
56
+
57
+ def app():
58
+ with gr.Blocks():
59
+ with gr.Row():
60
+ with gr.Column():
61
+ stable_diffusion_inpaint_image_file = gr.Image(
62
+ source="upload",
63
+ tool="sketch",
64
+ elem_id="image_upload",
65
+ type="pil",
66
+ label="Upload",
67
+ ).style(height=260)
68
+
69
+ stable_diffusion_inpaint_prompt = gr.Textbox(
70
+ lines=1,
71
+ placeholder="Prompt",
72
+ show_label=False,
73
+ )
74
+
75
+ stable_diffusion_inpaint_negative_prompt = gr.Textbox(
76
+ lines=1,
77
+ placeholder="Negative Prompt",
78
+ show_label=False,
79
+ )
80
+ stable_diffusion_inpaint_model_id = gr.Dropdown(
81
+ choices=stable_inpiant_model_list,
82
+ value=stable_inpiant_model_list[0],
83
+ label="Inpaint Model Id",
84
+ )
85
+ with gr.Row():
86
+ with gr.Column():
87
+ stable_diffusion_inpaint_guidance_scale = gr.Slider(
88
+ minimum=0.1,
89
+ maximum=15,
90
+ step=0.1,
91
+ value=7.5,
92
+ label="Guidance Scale",
93
+ )
94
+
95
+ stable_diffusion_inpaint_num_inference_step = (
96
+ gr.Slider(
97
+ minimum=1,
98
+ maximum=100,
99
+ step=1,
100
+ value=50,
101
+ label="Num Inference Step",
102
+ )
103
+ )
104
+
105
+ with gr.Row():
106
+ with gr.Column():
107
+ stable_diffusion_inpiant_num_images_per_prompt = gr.Slider(
108
+ minimum=1,
109
+ maximum=10,
110
+ step=1,
111
+ value=1,
112
+ label="Number Of Images",
113
+ )
114
+ stable_diffusion_inpaint_seed_generator = (
115
+ gr.Slider(
116
+ minimum=0,
117
+ maximum=1000000,
118
+ step=1,
119
+ value=0,
120
+ label="Seed(0 for random)",
121
+ )
122
+ )
123
+
124
+ stable_diffusion_inpaint_predict = gr.Button(
125
+ value="Generator"
126
+ )
127
+
128
+ with gr.Column():
129
+ output_image = gr.Gallery(
130
+ label="Generated images",
131
+ show_label=False,
132
+ elem_id="gallery",
133
+ ).style(grid=(1, 2))
134
+
135
+ stable_diffusion_inpaint_predict.click(
136
+ fn=StableDiffusionInpaintGenerator().generate_image,
137
+ inputs=[
138
+ stable_diffusion_inpaint_image_file,
139
+ stable_diffusion_inpaint_model_id,
140
+ stable_diffusion_inpaint_prompt,
141
+ stable_diffusion_inpaint_negative_prompt,
142
+ stable_diffusion_inpiant_num_images_per_prompt,
143
+ stable_diffusion_inpaint_guidance_scale,
144
+ stable_diffusion_inpaint_num_inference_step,
145
+ stable_diffusion_inpaint_seed_generator,
146
+ ],
147
+ outputs=[output_image],
148
+ )
diffusion_webui/diffusion_models/stable_diffusion/text2img_app.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import StableDiffusionPipeline
4
+
5
+ from diffusion_webui.utils.model_list import stable_model_list
6
+ from diffusion_webui.utils.scheduler_list import (
7
+ SCHEDULER_LIST,
8
+ get_scheduler_list,
9
+ )
10
+
11
+
12
+ class StableDiffusionText2ImageGenerator:
13
+ def __init__(self):
14
+ self.pipe = None
15
+
16
+ def load_model(
17
+ self,
18
+ model_path,
19
+ scheduler,
20
+ ):
21
+ if self.pipe is None:
22
+ self.pipe = StableDiffusionPipeline.from_pretrained(
23
+ model_path, safety_checker=None, torch_dtype=torch.float16
24
+ )
25
+
26
+ self.pipe = get_scheduler_list(pipe=self.pipe, scheduler=scheduler)
27
+ self.pipe.to("cuda")
28
+ self.pipe.enable_xformers_memory_efficient_attention()
29
+
30
+ return self.pipe
31
+
32
+ def generate_image(
33
+ self,
34
+ model_path: str,
35
+ prompt: str,
36
+ negative_prompt: str,
37
+ num_images_per_prompt: int,
38
+ scheduler: str,
39
+ guidance_scale: int,
40
+ num_inference_step: int,
41
+ height: int,
42
+ width: int,
43
+ seed_generator=0,
44
+ ):
45
+ pipe = self.load_model(
46
+ model_path=model_path,
47
+ scheduler=scheduler,
48
+ )
49
+ if seed_generator == 0:
50
+ random_seed = torch.randint(0, 1000000, (1,))
51
+ generator = torch.manual_seed(random_seed)
52
+ else:
53
+ generator = torch.manual_seed(seed_generator)
54
+
55
+ images = pipe(
56
+ prompt=prompt,
57
+ height=height,
58
+ width=width,
59
+ negative_prompt=negative_prompt,
60
+ num_images_per_prompt=num_images_per_prompt,
61
+ num_inference_steps=num_inference_step,
62
+ guidance_scale=guidance_scale,
63
+ generator=generator,
64
+ ).images
65
+
66
+ return images
67
+
68
+ def app():
69
+ with gr.Blocks():
70
+ with gr.Row():
71
+ with gr.Column():
72
+ text2image_prompt = gr.Textbox(
73
+ lines=1,
74
+ placeholder="Prompt",
75
+ show_label=False,
76
+ )
77
+
78
+ text2image_negative_prompt = gr.Textbox(
79
+ lines=1,
80
+ placeholder="Negative Prompt",
81
+ show_label=False,
82
+ )
83
+ with gr.Row():
84
+ with gr.Column():
85
+ text2image_model_path = gr.Dropdown(
86
+ choices=stable_model_list,
87
+ value=stable_model_list[0],
88
+ label="Text-Image Model Id",
89
+ )
90
+
91
+ text2image_guidance_scale = gr.Slider(
92
+ minimum=0.1,
93
+ maximum=15,
94
+ step=0.1,
95
+ value=7.5,
96
+ label="Guidance Scale",
97
+ )
98
+
99
+ text2image_num_inference_step = gr.Slider(
100
+ minimum=1,
101
+ maximum=100,
102
+ step=1,
103
+ value=50,
104
+ label="Num Inference Step",
105
+ )
106
+ text2image_num_images_per_prompt = gr.Slider(
107
+ minimum=1,
108
+ maximum=30,
109
+ step=1,
110
+ value=1,
111
+ label="Number Of Images",
112
+ )
113
+ with gr.Row():
114
+ with gr.Column():
115
+
116
+ text2image_scheduler = gr.Dropdown(
117
+ choices=SCHEDULER_LIST,
118
+ value=SCHEDULER_LIST[0],
119
+ label="Scheduler",
120
+ )
121
+
122
+ text2image_height = gr.Slider(
123
+ minimum=128,
124
+ maximum=1280,
125
+ step=32,
126
+ value=512,
127
+ label="Image Height",
128
+ )
129
+
130
+ text2image_width = gr.Slider(
131
+ minimum=128,
132
+ maximum=1280,
133
+ step=32,
134
+ value=512,
135
+ label="Image Width",
136
+ )
137
+ text2image_seed_generator = gr.Slider(
138
+ label="Seed(0 for random)",
139
+ minimum=0,
140
+ maximum=1000000,
141
+ value=0,
142
+ )
143
+ text2image_predict = gr.Button(value="Generator")
144
+
145
+ with gr.Column():
146
+ output_image = gr.Gallery(
147
+ label="Generated images",
148
+ show_label=False,
149
+ elem_id="gallery",
150
+ ).style(grid=(1, 2), height=200)
151
+
152
+ text2image_predict.click(
153
+ fn=StableDiffusionText2ImageGenerator().generate_image,
154
+ inputs=[
155
+ text2image_model_path,
156
+ text2image_prompt,
157
+ text2image_negative_prompt,
158
+ text2image_num_images_per_prompt,
159
+ text2image_scheduler,
160
+ text2image_guidance_scale,
161
+ text2image_num_inference_step,
162
+ text2image_height,
163
+ text2image_width,
164
+ text2image_seed_generator,
165
+ ],
166
+ outputs=output_image,
167
+ )
diffusion_webui/helpers.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusion_webui.diffusion_models.controlnet.controlnet_canny import (
2
+ StableDiffusionControlNetCannyGenerator,
3
+ )
4
+ from diffusion_webui.diffusion_models.controlnet.controlnet_depth import (
5
+ StableDiffusionControlNetDepthGenerator,
6
+ )
7
+ from diffusion_webui.diffusion_models.controlnet.controlnet_hed import (
8
+ StableDiffusionControlNetHEDGenerator,
9
+ )
10
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_canny import (
11
+ StableDiffusionControlNetInpaintCannyGenerator,
12
+ )
13
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_depth import (
14
+ StableDiffusionControlInpaintNetDepthGenerator,
15
+ )
16
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_hed import (
17
+ StableDiffusionControlNetInpaintHedGenerator,
18
+ )
19
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_mlsd import (
20
+ StableDiffusionControlNetInpaintMlsdGenerator,
21
+ )
22
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_pose import (
23
+ StableDiffusionControlNetInpaintPoseGenerator,
24
+ )
25
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_scribble import (
26
+ StableDiffusionControlNetInpaintScribbleGenerator,
27
+ )
28
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.controlnet_inpaint_seg import (
29
+ StableDiffusionControlNetInpaintSegGenerator,
30
+ )
31
+ from diffusion_webui.diffusion_models.controlnet.controlnet_mlsd import (
32
+ StableDiffusionControlNetMLSDGenerator,
33
+ )
34
+ from diffusion_webui.diffusion_models.controlnet.controlnet_pose import (
35
+ StableDiffusionControlNetPoseGenerator,
36
+ )
37
+ from diffusion_webui.diffusion_models.controlnet.controlnet_scribble import (
38
+ StableDiffusionControlNetScribbleGenerator,
39
+ )
40
+ from diffusion_webui.diffusion_models.controlnet.controlnet_seg import (
41
+ StableDiffusionControlNetSegGenerator,
42
+ )
43
+ from diffusion_webui.diffusion_models.stable_diffusion.img2img_app import (
44
+ StableDiffusionImage2ImageGenerator,
45
+ )
46
+ from diffusion_webui.diffusion_models.stable_diffusion.inpaint_app import (
47
+ StableDiffusionInpaintGenerator,
48
+ )
49
+ from diffusion_webui.diffusion_models.stable_diffusion.text2img_app import (
50
+ StableDiffusionText2ImageGenerator,
51
+ )
52
+ from diffusion_webui.upscaler_models.codeformer_upscaler import (
53
+ CodeformerUpscalerGenerator,
54
+ )
diffusion_webui/upscaler_models/codeformer_upscaler.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from codeformer.app import inference_app
3
+
4
+
5
+ class CodeformerUpscalerGenerator:
6
+ def generate_image(
7
+ self,
8
+ image_path: str,
9
+ background_enhance: bool,
10
+ face_upsample: bool,
11
+ upscale: int,
12
+ codeformer_fidelity: int,
13
+ ):
14
+
15
+ pipe = inference_app(
16
+ image=image_path,
17
+ background_enhance=background_enhance,
18
+ face_upsample=face_upsample,
19
+ upscale=upscale,
20
+ codeformer_fidelity=codeformer_fidelity,
21
+ )
22
+
23
+ return [pipe]
24
+
25
+ def app():
26
+ with gr.Blocks():
27
+ with gr.Row():
28
+ with gr.Column():
29
+ codeformer_upscale_image_file = gr.Image(
30
+ type="filepath", label="Image"
31
+ ).style(height=260)
32
+
33
+ with gr.Row():
34
+ with gr.Column():
35
+ codeformer_face_upsample = gr.Checkbox(
36
+ label="Face Upsample",
37
+ value=True,
38
+ )
39
+ codeformer_upscale = gr.Slider(
40
+ label="Upscale",
41
+ minimum=1,
42
+ maximum=4,
43
+ step=1,
44
+ value=2,
45
+ )
46
+ with gr.Row():
47
+ with gr.Column():
48
+ codeformer_background_enhance = gr.Checkbox(
49
+ label="Background Enhance",
50
+ value=True,
51
+ )
52
+ codeformer_upscale_fidelity = gr.Slider(
53
+ label="Codeformer Fidelity",
54
+ minimum=0.1,
55
+ maximum=1.0,
56
+ step=0.1,
57
+ value=0.5,
58
+ )
59
+
60
+ codeformer_upscale_predict_button = gr.Button(
61
+ value="Generator"
62
+ )
63
+
64
+ with gr.Column():
65
+ output_image = gr.Gallery(
66
+ label="Generated images",
67
+ show_label=False,
68
+ elem_id="gallery",
69
+ ).style(grid=(1, 2))
70
+
71
+ codeformer_upscale_predict_button.click(
72
+ fn=CodeformerUpscalerGenerator().generate_image,
73
+ inputs=[
74
+ codeformer_upscale_image_file,
75
+ codeformer_background_enhance,
76
+ codeformer_face_upsample,
77
+ codeformer_upscale,
78
+ codeformer_upscale_fidelity,
79
+ ],
80
+ outputs=[output_image],
81
+ )
diffusion_webui/utils/__init__.py ADDED
File without changes
diffusion_webui/utils/data_utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+
3
+
4
+ def image_grid(imgs, rows, cols):
5
+ assert len(imgs) == rows * cols
6
+
7
+ w, h = imgs[0].size
8
+ grid = Image.new("RGB", size=(cols * w, rows * h))
9
+
10
+ for i, img in enumerate(imgs):
11
+ grid.paste(img, box=(i % cols * w, i // cols * h))
12
+ return grid
diffusion_webui/utils/model_list.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ stable_model_list = [
2
+ "runwayml/stable-diffusion-v1-5",
3
+ "stabilityai/stable-diffusion-2-1",
4
+ "prompthero/openjourney-v4",
5
+ "wavymulder/Analog-Diffusion",
6
+ "dreamlike-art/dreamlike-diffusion-1.0",
7
+ "gsdf/Counterfeit-V2.5",
8
+ "dreamlike-art/dreamlike-photoreal-2.0"
9
+
10
+
11
+ ]
12
+
13
+ controlnet_canny_model_list = [
14
+ "lllyasviel/sd-controlnet-canny",
15
+ "thibaud/controlnet-sd21-canny-diffusers",
16
+ ]
17
+
18
+ controlnet_depth_model_list = [
19
+ "lllyasviel/sd-controlnet-depth",
20
+ "thibaud/controlnet-sd21-depth-diffusers",
21
+ ]
22
+
23
+ controlnet_pose_model_list = [
24
+ "lllyasviel/sd-controlnet-openpose",
25
+ "thibaud/controlnet-sd21-openpose-diffusers",
26
+ ]
27
+
28
+ controlnet_hed_model_list = [
29
+ "lllyasviel/sd-controlnet-hed",
30
+ "thibaud/controlnet-sd21-hed-diffusers",
31
+ ]
32
+
33
+ controlnet_scribble_model_list = [
34
+ "lllyasviel/sd-controlnet-scribble",
35
+ "thibaud/controlnet-sd21-scribble-diffusers",
36
+ ]
37
+ stable_inpiant_model_list = [
38
+ "stabilityai/stable-diffusion-2-inpainting",
39
+ "runwayml/stable-diffusion-inpainting",
40
+ ]
41
+
42
+ controlnet_mlsd_model_list = [
43
+ "lllyasviel/sd-controlnet-mlsd",
44
+ ]
45
+
46
+ controlnet_seg_model_list = [
47
+ "lllyasviel/sd-controlnet-seg",
48
+ ]
diffusion_webui/utils/scheduler_list.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import (
2
+ DDIMScheduler,
3
+ EulerAncestralDiscreteScheduler,
4
+ EulerDiscreteScheduler,
5
+ HeunDiscreteScheduler,
6
+ LMSDiscreteScheduler,
7
+ UniPCMultistepScheduler,
8
+ )
9
+
10
+ SCHEDULER_LIST = [
11
+ "DDIM",
12
+ "EulerA",
13
+ "Euler",
14
+ "LMS",
15
+ "Heun",
16
+ "UniPC",
17
+ ]
18
+
19
+
20
+ def get_scheduler_list(pipe, scheduler):
21
+ if scheduler == SCHEDULER_LIST[0]:
22
+ pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
23
+
24
+ elif scheduler == SCHEDULER_LIST[1]:
25
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
26
+ pipe.scheduler.config
27
+ )
28
+
29
+ elif scheduler == SCHEDULER_LIST[2]:
30
+ pipe.scheduler = EulerDiscreteScheduler.from_config(
31
+ pipe.scheduler.config
32
+ )
33
+
34
+ elif scheduler == SCHEDULER_LIST[3]:
35
+ pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
36
+
37
+ elif scheduler == SCHEDULER_LIST[4]:
38
+ pipe.scheduler = HeunDiscreteScheduler.from_config(
39
+ pipe.scheduler.config
40
+ )
41
+
42
+ elif scheduler == SCHEDULER_LIST[5]:
43
+ pipe.scheduler = UniPCMultistepScheduler.from_config(
44
+ pipe.scheduler.config
45
+ )
46
+
47
+ return pipe
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ transformers
2
+ bitsandbytes==0.35.0
3
+ xformers
4
+ controlnet_aux
5
+ diffusers
6
+ imageio
7
+ gradio
8
+ triton
9
+ codeformer-pip