Files changed (1) hide show
  1. app.py +373 -369
app.py CHANGED
@@ -1,369 +1,373 @@
1
- import spaces
2
- import gradio as gr
3
- from gradio_imageslider import ImageSlider
4
- import torch
5
-
6
- torch.jit.script = lambda f: f
7
- from hidiffusion import apply_hidiffusion
8
- from diffusers import (
9
- ControlNetModel,
10
- StableDiffusionXLControlNetImg2ImgPipeline,
11
- DDIMScheduler,
12
- )
13
- from controlnet_aux import AnylineDetector
14
- from compel import Compel, ReturnedEmbeddingsType
15
- from PIL import Image
16
- import os
17
- import time
18
- import numpy as np
19
-
20
- IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1"
21
- IS_SPACE = os.environ.get("SPACE_ID", None) is not None
22
-
23
- device = "cuda" if torch.cuda.is_available() else "cpu"
24
- dtype = torch.float16
25
-
26
- LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
27
-
28
- print(f"device: {device}")
29
- print(f"dtype: {dtype}")
30
- print(f"low memory: {LOW_MEMORY}")
31
-
32
-
33
- model = "stabilityai/stable-diffusion-xl-base-1.0"
34
- # model = "stabilityai/sdxl-turbo"
35
- # vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype)
36
- scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
37
- # controlnet = ControlNetModel.from_pretrained(
38
- # "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
39
- # )
40
- controlnet = ControlNetModel.from_pretrained(
41
- "TheMistoAI/MistoLine",
42
- torch_dtype=torch.float16,
43
- revision="refs/pr/3",
44
- variant="fp16",
45
- )
46
- pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
47
- model,
48
- controlnet=controlnet,
49
- torch_dtype=dtype,
50
- variant="fp16",
51
- use_safetensors=True,
52
- scheduler=scheduler,
53
- )
54
-
55
- compel = Compel(
56
- tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
57
- text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
58
- returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
59
- requires_pooled=[False, True],
60
- )
61
- pipe = pipe.to(device)
62
-
63
- if not IS_SPACES_ZERO:
64
- apply_hidiffusion(pipe)
65
- # pipe.enable_xformers_memory_efficient_attention()
66
- pipe.enable_model_cpu_offload()
67
- pipe.enable_vae_tiling()
68
-
69
- anyline = AnylineDetector.from_pretrained(
70
- "TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
71
- ).to(device)
72
-
73
-
74
- def pad_image(image):
75
- w, h = image.size
76
- if w == h:
77
- return image
78
- elif w > h:
79
- new_image = Image.new(image.mode, (w, w), (0, 0, 0))
80
- pad_w = 0
81
- pad_h = (w - h) // 2
82
- new_image.paste(image, (0, pad_h))
83
- return new_image
84
- else:
85
- new_image = Image.new(image.mode, (h, h), (0, 0, 0))
86
- pad_w = (h - w) // 2
87
- pad_h = 0
88
- new_image.paste(image, (pad_w, 0))
89
- return new_image
90
-
91
-
92
- @spaces.GPU(duration=120)
93
- def predict(
94
- input_image,
95
- prompt,
96
- negative_prompt,
97
- seed,
98
- guidance_scale=8.5,
99
- scale=2,
100
- controlnet_conditioning_scale=0.5,
101
- strength=1.0,
102
- controlnet_start=0.0,
103
- controlnet_end=1.0,
104
- guassian_sigma=2.0,
105
- intensity_threshold=3,
106
- progress=gr.Progress(track_tqdm=True),
107
- ):
108
- if IS_SPACES_ZERO:
109
- apply_hidiffusion(pipe)
110
- if input_image is None:
111
- raise gr.Error("Please upload an image.")
112
- padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
113
- conditioning, pooled = compel([prompt, negative_prompt])
114
- generator = torch.manual_seed(seed)
115
- last_time = time.time()
116
- anyline_image = anyline(
117
- padded_image,
118
- detect_resolution=1280,
119
- guassian_sigma=max(0.01, guassian_sigma),
120
- intensity_threshold=intensity_threshold,
121
- )
122
-
123
- images = pipe(
124
- image=padded_image,
125
- control_image=anyline_image,
126
- strength=strength,
127
- prompt_embeds=conditioning[0:1],
128
- pooled_prompt_embeds=pooled[0:1],
129
- negative_prompt_embeds=conditioning[1:2],
130
- negative_pooled_prompt_embeds=pooled[1:2],
131
- width=1024 * scale,
132
- height=1024 * scale,
133
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
134
- controlnet_start=float(controlnet_start),
135
- controlnet_end=float(controlnet_end),
136
- generator=generator,
137
- num_inference_steps=30,
138
- guidance_scale=guidance_scale,
139
- eta=1.0,
140
- )
141
- print(f"Time taken: {time.time() - last_time}")
142
- return (padded_image, images.images[0]), padded_image, anyline_image
143
-
144
-
145
- css = """
146
- #intro{
147
- # max-width: 32rem;
148
- # text-align: center;
149
- # margin: 0 auto;
150
- }
151
- """
152
-
153
- with gr.Blocks(css=css) as demo:
154
- gr.Markdown(
155
- """
156
- # Enhance This
157
- ### HiDiffusion SDXL
158
-
159
- [HiDiffusion](https://github.com/megvii-research/HiDiffusion) enables higher-resolution image generation.
160
- You can upload an initial image and prompt to generate an enhanced version.
161
- SDXL Controlnet [TheMistoAI/MistoLine](https://huggingface.co/TheMistoAI/MistoLine)
162
- [Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL?duplicate=true) to avoid the queue.
163
-
164
- <small>
165
- <b>Notes</b> The author advises against the term "super resolution" because it's more like image-to-image generation than enhancement, but it's still a lot of fun!
166
-
167
- </small>
168
- """,
169
- elem_id="intro",
170
- )
171
- with gr.Row():
172
- with gr.Column(scale=1):
173
- image_input = gr.Image(type="pil", label="Input Image")
174
- prompt = gr.Textbox(
175
- label="Prompt",
176
- info="The prompt is very important to get the desired results. Please try to describe the image as best as you can. Accepts Compel Syntax",
177
- )
178
- negative_prompt = gr.Textbox(
179
- label="Negative Prompt",
180
- value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
181
- )
182
- seed = gr.Slider(
183
- minimum=0,
184
- maximum=2**64 - 1,
185
- value=1415926535897932,
186
- step=1,
187
- label="Seed",
188
- randomize=True,
189
- )
190
- with gr.Accordion(label="Advanced", open=False):
191
- guidance_scale = gr.Slider(
192
- minimum=0,
193
- maximum=50,
194
- value=8.5,
195
- step=0.001,
196
- label="Guidance Scale",
197
- )
198
- scale = gr.Slider(
199
- minimum=1,
200
- maximum=5,
201
- value=2,
202
- step=1,
203
- label="Magnification Scale",
204
- interactive=not IS_SPACE,
205
- )
206
- controlnet_conditioning_scale = gr.Slider(
207
- minimum=0,
208
- maximum=1,
209
- step=0.001,
210
- value=0.5,
211
- label="ControlNet Conditioning Scale",
212
- )
213
- strength = gr.Slider(
214
- minimum=0,
215
- maximum=1,
216
- step=0.001,
217
- value=1,
218
- label="Strength",
219
- )
220
- controlnet_start = gr.Slider(
221
- minimum=0,
222
- maximum=1,
223
- step=0.001,
224
- value=0.0,
225
- label="ControlNet Start",
226
- )
227
- controlnet_end = gr.Slider(
228
- minimum=0.0,
229
- maximum=1.0,
230
- step=0.001,
231
- value=1.0,
232
- label="ControlNet End",
233
- )
234
- guassian_sigma = gr.Slider(
235
- minimum=0.01,
236
- maximum=10.0,
237
- step=0.1,
238
- value=2.0,
239
- label="(Anyline) Guassian Sigma",
240
- )
241
- intensity_threshold = gr.Slider(
242
- minimum=0,
243
- maximum=255,
244
- step=1,
245
- value=3,
246
- label="(Anyline) Intensity Threshold",
247
- )
248
-
249
- btn = gr.Button()
250
- with gr.Column(scale=2):
251
- with gr.Group():
252
- image_slider = ImageSlider(position=0.5)
253
- with gr.Row():
254
- padded_image = gr.Image(type="pil", label="Padded Image")
255
- anyline_image = gr.Image(type="pil", label="Anyline Image")
256
- inputs = [
257
- image_input,
258
- prompt,
259
- negative_prompt,
260
- seed,
261
- guidance_scale,
262
- scale,
263
- controlnet_conditioning_scale,
264
- strength,
265
- controlnet_start,
266
- controlnet_end,
267
- guassian_sigma,
268
- intensity_threshold,
269
- ]
270
- outputs = [image_slider, padded_image, anyline_image]
271
- btn.click(lambda x: None, inputs=None, outputs=image_slider).then(
272
- fn=predict, inputs=inputs, outputs=outputs
273
- )
274
- gr.Examples(
275
- fn=predict,
276
- inputs=inputs,
277
- outputs=outputs,
278
- examples=[
279
- [
280
- "./examples/lara.jpeg",
281
- "photography of lara croft 8k high definition award winning",
282
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
283
- 5436236241,
284
- 8.5,
285
- 2,
286
- 0.8,
287
- 1.0,
288
- 0.0,
289
- 0.9,
290
- 2,
291
- 3,
292
- ],
293
- [
294
- "./examples/cybetruck.jpeg",
295
- "photo of tesla cybertruck futuristic car 8k high definition on a sand dune in mars, future",
296
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
297
- 383472451451,
298
- 8.5,
299
- 2,
300
- 0.8,
301
- 0.8,
302
- 0.0,
303
- 0.9,
304
- 2,
305
- 3,
306
- ],
307
- [
308
- "./examples/jesus.png",
309
- "a photorealistic painting of Jesus Christ, 4k high definition",
310
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
311
- 13317204146129588000,
312
- 8.5,
313
- 2,
314
- 0.8,
315
- 0.8,
316
- 0.0,
317
- 0.9,
318
- 2,
319
- 3,
320
- ],
321
- [
322
- "./examples/anna-sullivan-DioLM8ViiO8-unsplash.jpg",
323
- "A crowded stadium with enthusiastic fans watching a daytime sporting event, the stands filled with colorful attire and the sun casting a warm glow",
324
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
325
- 5623124123512,
326
- 8.5,
327
- 2,
328
- 0.8,
329
- 0.8,
330
- 0.0,
331
- 0.9,
332
- 2,
333
- 3,
334
- ],
335
- [
336
- "./examples/img_aef651cb-2919-499d-aa49-6d4e2e21a56e_1024.jpg",
337
- "a large red flower on a black background 4k high definition",
338
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
339
- 23123412341234,
340
- 8.5,
341
- 2,
342
- 0.8,
343
- 0.8,
344
- 0.0,
345
- 0.9,
346
- 2,
347
- 3,
348
- ],
349
- [
350
- "./examples/huggingface.jpg",
351
- "photo realistic huggingface human emoji costume, round, yellow, (human skin)+++ (human texture)+++",
352
- "blurry, ugly, duplicate, poorly drawn, deformed, mosaic, emoji cartoon, drawing, pixelated",
353
- 12312353423,
354
- 15.206,
355
- 2,
356
- 0.364,
357
- 0.8,
358
- 0.0,
359
- 0.9,
360
- 2,
361
- 3,
362
- ],
363
- ],
364
- cache_examples="lazy",
365
- )
366
-
367
-
368
- demo.queue(api_open=False)
369
- demo.launch(show_api=False)
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ from gradio_imageslider import ImageSlider
4
+ import torch
5
+
6
+ torch.jit.script = lambda f: f
7
+ from hidiffusion import apply_hidiffusion
8
+ from diffusers import (
9
+ ControlNetModel,
10
+ StableDiffusionXLControlNetImg2ImgPipeline,
11
+ DDIMScheduler,
12
+ )
13
+ from controlnet_aux import AnylineDetector
14
+ from compel import Compel, ReturnedEmbeddingsType
15
+ from PIL import Image
16
+ import os
17
+ import time
18
+ import numpy as np
19
+
20
+ IS_SPACES_ZERO = os.environ.get("SPACES_ZERO_GPU", "0") == "1"
21
+ IS_SPACE = os.environ.get("SPACE_ID", None) is not None
22
+
23
+ device = "cuda" if torch.cuda.is_available() else "cpu"
24
+ dtype = torch.float16
25
+
26
+ LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
27
+
28
+ print(f"device: {device}")
29
+ print(f"dtype: {dtype}")
30
+ print(f"low memory: {LOW_MEMORY}")
31
+
32
+
33
+ model = "stabilityai/stable-diffusion-xl-base-1.0"
34
+ # model = "stabilityai/sdxl-turbo"
35
+ # vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype)
36
+ scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
37
+ # controlnet = ControlNetModel.from_pretrained(
38
+ # "diffusers/controlnet-canny-sdxl-1.0", torch_dtype=torch.float16
39
+ # )
40
+ controlnet = ControlNetModel.from_pretrained(
41
+ "TheMistoAI/MistoLine",
42
+ torch_dtype=torch.float16,
43
+ revision="refs/pr/3",
44
+ variant="fp16",
45
+ )
46
+ pipe = StableDiffusionXLControlNetImg2ImgPipeline.from_pretrained(
47
+ model,
48
+ controlnet=controlnet,
49
+ torch_dtype=dtype,
50
+ variant="fp16",
51
+ use_safetensors=True,
52
+ scheduler=scheduler,
53
+ )
54
+
55
+ compel = Compel(
56
+ tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
57
+ text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
58
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
59
+ requires_pooled=[False, True],
60
+ )
61
+ #pipe = pipe.to(device)
62
+
63
+ if not IS_SPACES_ZERO:
64
+ apply_hidiffusion(pipe)
65
+ # pipe.enable_xformers_memory_efficient_attention()
66
+ pipe.enable_model_cpu_offload()
67
+ pipe.enable_vae_tiling()
68
+
69
+ anyline = AnylineDetector.from_pretrained(
70
+ "TheMistoAI/MistoLine", filename="MTEED.pth", subfolder="Anyline"
71
+ ).to("cpu") #.to(device)
72
+
73
+
74
+ def pad_image(image):
75
+ w, h = image.size
76
+ if w == h:
77
+ return image
78
+ elif w > h:
79
+ new_image = Image.new(image.mode, (w, w), (0, 0, 0))
80
+ pad_w = 0
81
+ pad_h = (w - h) // 2
82
+ new_image.paste(image, (0, pad_h))
83
+ return new_image
84
+ else:
85
+ new_image = Image.new(image.mode, (h, h), (0, 0, 0))
86
+ pad_w = (h - w) // 2
87
+ pad_h = 0
88
+ new_image.paste(image, (pad_w, 0))
89
+ return new_image
90
+
91
+
92
+ @spaces.GPU(duration=120)
93
+ def predict(
94
+ input_image,
95
+ prompt,
96
+ negative_prompt,
97
+ seed,
98
+ guidance_scale=8.5,
99
+ scale=2,
100
+ controlnet_conditioning_scale=0.5,
101
+ strength=1.0,
102
+ controlnet_start=0.0,
103
+ controlnet_end=1.0,
104
+ guassian_sigma=2.0,
105
+ intensity_threshold=3,
106
+ progress=gr.Progress(track_tqdm=True),
107
+ ):
108
+ global pipe
109
+ global anyline
110
+ pipe = pipe.to(device)
111
+ anyline.to(device)
112
+ if IS_SPACES_ZERO:
113
+ apply_hidiffusion(pipe)
114
+ if input_image is None:
115
+ raise gr.Error("Please upload an image.")
116
+ padded_image = pad_image(input_image).resize((1024, 1024)).convert("RGB")
117
+ conditioning, pooled = compel([prompt, negative_prompt])
118
+ generator = torch.manual_seed(seed)
119
+ last_time = time.time()
120
+ anyline_image = anyline(
121
+ padded_image,
122
+ detect_resolution=1280,
123
+ guassian_sigma=max(0.01, guassian_sigma),
124
+ intensity_threshold=intensity_threshold,
125
+ )
126
+
127
+ images = pipe(
128
+ image=padded_image,
129
+ control_image=anyline_image,
130
+ strength=strength,
131
+ prompt_embeds=conditioning[0:1],
132
+ pooled_prompt_embeds=pooled[0:1],
133
+ negative_prompt_embeds=conditioning[1:2],
134
+ negative_pooled_prompt_embeds=pooled[1:2],
135
+ width=1024 * scale,
136
+ height=1024 * scale,
137
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
138
+ controlnet_start=float(controlnet_start),
139
+ controlnet_end=float(controlnet_end),
140
+ generator=generator,
141
+ num_inference_steps=30,
142
+ guidance_scale=guidance_scale,
143
+ eta=1.0,
144
+ )
145
+ print(f"Time taken: {time.time() - last_time}")
146
+ return (padded_image, images.images[0]), padded_image, anyline_image
147
+
148
+
149
+ css = """
150
+ #intro{
151
+ # max-width: 32rem;
152
+ # text-align: center;
153
+ # margin: 0 auto;
154
+ }
155
+ """
156
+
157
+ with gr.Blocks(css=css) as demo:
158
+ gr.Markdown(
159
+ """
160
+ # Enhance This
161
+ ### HiDiffusion SDXL
162
+
163
+ [HiDiffusion](https://github.com/megvii-research/HiDiffusion) enables higher-resolution image generation.
164
+ You can upload an initial image and prompt to generate an enhanced version.
165
+ SDXL Controlnet [TheMistoAI/MistoLine](https://huggingface.co/TheMistoAI/MistoLine)
166
+ [Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-HiDiffusion-SDXL?duplicate=true) to avoid the queue.
167
+
168
+ <small>
169
+ <b>Notes</b> The author advises against the term "super resolution" because it's more like image-to-image generation than enhancement, but it's still a lot of fun!
170
+
171
+ </small>
172
+ """,
173
+ elem_id="intro",
174
+ )
175
+ with gr.Row():
176
+ with gr.Column(scale=1):
177
+ image_input = gr.Image(type="pil", label="Input Image")
178
+ prompt = gr.Textbox(
179
+ label="Prompt",
180
+ info="The prompt is very important to get the desired results. Please try to describe the image as best as you can. Accepts Compel Syntax",
181
+ )
182
+ negative_prompt = gr.Textbox(
183
+ label="Negative Prompt",
184
+ value="blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
185
+ )
186
+ seed = gr.Slider(
187
+ minimum=0,
188
+ maximum=2**64 - 1,
189
+ value=1415926535897932,
190
+ step=1,
191
+ label="Seed",
192
+ randomize=True,
193
+ )
194
+ with gr.Accordion(label="Advanced", open=False):
195
+ guidance_scale = gr.Slider(
196
+ minimum=0,
197
+ maximum=50,
198
+ value=8.5,
199
+ step=0.001,
200
+ label="Guidance Scale",
201
+ )
202
+ scale = gr.Slider(
203
+ minimum=1,
204
+ maximum=5,
205
+ value=2,
206
+ step=1,
207
+ label="Magnification Scale",
208
+ interactive=not IS_SPACE,
209
+ )
210
+ controlnet_conditioning_scale = gr.Slider(
211
+ minimum=0,
212
+ maximum=1,
213
+ step=0.001,
214
+ value=0.5,
215
+ label="ControlNet Conditioning Scale",
216
+ )
217
+ strength = gr.Slider(
218
+ minimum=0,
219
+ maximum=1,
220
+ step=0.001,
221
+ value=1,
222
+ label="Strength",
223
+ )
224
+ controlnet_start = gr.Slider(
225
+ minimum=0,
226
+ maximum=1,
227
+ step=0.001,
228
+ value=0.0,
229
+ label="ControlNet Start",
230
+ )
231
+ controlnet_end = gr.Slider(
232
+ minimum=0.0,
233
+ maximum=1.0,
234
+ step=0.001,
235
+ value=1.0,
236
+ label="ControlNet End",
237
+ )
238
+ guassian_sigma = gr.Slider(
239
+ minimum=0.01,
240
+ maximum=10.0,
241
+ step=0.1,
242
+ value=2.0,
243
+ label="(Anyline) Guassian Sigma",
244
+ )
245
+ intensity_threshold = gr.Slider(
246
+ minimum=0,
247
+ maximum=255,
248
+ step=1,
249
+ value=3,
250
+ label="(Anyline) Intensity Threshold",
251
+ )
252
+
253
+ btn = gr.Button()
254
+ with gr.Column(scale=2):
255
+ with gr.Group():
256
+ image_slider = ImageSlider(position=0.5)
257
+ with gr.Row():
258
+ padded_image = gr.Image(type="pil", label="Padded Image")
259
+ anyline_image = gr.Image(type="pil", label="Anyline Image")
260
+ inputs = [
261
+ image_input,
262
+ prompt,
263
+ negative_prompt,
264
+ seed,
265
+ guidance_scale,
266
+ scale,
267
+ controlnet_conditioning_scale,
268
+ strength,
269
+ controlnet_start,
270
+ controlnet_end,
271
+ guassian_sigma,
272
+ intensity_threshold,
273
+ ]
274
+ outputs = [image_slider, padded_image, anyline_image]
275
+ btn.click(lambda x: None, inputs=None, outputs=image_slider).then(
276
+ fn=predict, inputs=inputs, outputs=outputs
277
+ )
278
+ gr.Examples(
279
+ fn=predict,
280
+ inputs=inputs,
281
+ outputs=outputs,
282
+ examples=[
283
+ [
284
+ "./examples/lara.jpeg",
285
+ "photography of lara croft 8k high definition award winning",
286
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
287
+ 5436236241,
288
+ 8.5,
289
+ 2,
290
+ 0.8,
291
+ 1.0,
292
+ 0.0,
293
+ 0.9,
294
+ 2,
295
+ 3,
296
+ ],
297
+ [
298
+ "./examples/cybetruck.jpeg",
299
+ "photo of tesla cybertruck futuristic car 8k high definition on a sand dune in mars, future",
300
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
301
+ 383472451451,
302
+ 8.5,
303
+ 2,
304
+ 0.8,
305
+ 0.8,
306
+ 0.0,
307
+ 0.9,
308
+ 2,
309
+ 3,
310
+ ],
311
+ [
312
+ "./examples/jesus.png",
313
+ "a photorealistic painting of Jesus Christ, 4k high definition",
314
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
315
+ 13317204146129588000,
316
+ 8.5,
317
+ 2,
318
+ 0.8,
319
+ 0.8,
320
+ 0.0,
321
+ 0.9,
322
+ 2,
323
+ 3,
324
+ ],
325
+ [
326
+ "./examples/anna-sullivan-DioLM8ViiO8-unsplash.jpg",
327
+ "A crowded stadium with enthusiastic fans watching a daytime sporting event, the stands filled with colorful attire and the sun casting a warm glow",
328
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
329
+ 5623124123512,
330
+ 8.5,
331
+ 2,
332
+ 0.8,
333
+ 0.8,
334
+ 0.0,
335
+ 0.9,
336
+ 2,
337
+ 3,
338
+ ],
339
+ [
340
+ "./examples/img_aef651cb-2919-499d-aa49-6d4e2e21a56e_1024.jpg",
341
+ "a large red flower on a black background 4k high definition",
342
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic",
343
+ 23123412341234,
344
+ 8.5,
345
+ 2,
346
+ 0.8,
347
+ 0.8,
348
+ 0.0,
349
+ 0.9,
350
+ 2,
351
+ 3,
352
+ ],
353
+ [
354
+ "./examples/huggingface.jpg",
355
+ "photo realistic huggingface human emoji costume, round, yellow, (human skin)+++ (human texture)+++",
356
+ "blurry, ugly, duplicate, poorly drawn, deformed, mosaic, emoji cartoon, drawing, pixelated",
357
+ 12312353423,
358
+ 15.206,
359
+ 2,
360
+ 0.364,
361
+ 0.8,
362
+ 0.0,
363
+ 0.9,
364
+ 2,
365
+ 3,
366
+ ],
367
+ ],
368
+ cache_examples="lazy",
369
+ )
370
+
371
+
372
+ demo.queue(api_open=False)
373
+ demo.launch(show_api=False)