wanghaofan commited on
Commit
05be4f7
1 Parent(s): d241563

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -21
app.py CHANGED
@@ -21,7 +21,7 @@ from insightface.app import FaceAnalysis
21
  from style_template import styles
22
  from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
23
 
24
- from controlnet_aux import OpenposeDetector
25
 
26
  import gradio as gr
27
 
@@ -58,7 +58,7 @@ app = FaceAnalysis(
58
  )
59
  app.prepare(ctx_id=0, det_size=(640, 640))
60
 
61
- openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
62
 
63
  depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
64
 
@@ -86,13 +86,13 @@ controlnet_identitynet = ControlNetModel.from_pretrained(
86
  )
87
 
88
  # controlnet-pose/canny/depth
89
- controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
90
  controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
91
  controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
92
 
93
- controlnet_pose = ControlNetModel.from_pretrained(
94
- controlnet_pose_model, torch_dtype=dtype
95
- ).to(device)
96
  controlnet_canny = ControlNetModel.from_pretrained(
97
  controlnet_canny_model, torch_dtype=dtype
98
  ).to(device)
@@ -127,12 +127,12 @@ def get_canny_image(image, t1=100, t2=200):
127
  return Image.fromarray(edges, "L")
128
 
129
  controlnet_map = {
130
- "pose": controlnet_pose,
131
  "canny": controlnet_canny,
132
  "depth": controlnet_depth,
133
  }
134
  controlnet_map_fn = {
135
- "pose": openpose,
136
  "canny": get_canny_image,
137
  "depth": get_depth_map,
138
  }
@@ -230,10 +230,10 @@ def run_for_examples(face_file, pose_file, prompt, style, negative_prompt):
230
  20, # num_steps
231
  0.8, # identitynet_strength_ratio
232
  0.8, # adapter_strength_ratio
233
- 0.4, # pose_strength
234
  0.3, # canny_strength
235
  0.5, # depth_strength
236
- ["pose", "canny"], # controlnet_selection
237
  5.0, # guidance_scale
238
  42, # seed
239
  "EulerDiscreteScheduler", # scheduler
@@ -294,7 +294,7 @@ def generate_image(
294
  num_steps,
295
  identitynet_strength_ratio,
296
  adapter_strength_ratio,
297
- pose_strength,
298
  canny_strength,
299
  depth_strength,
300
  controlnet_selection,
@@ -383,7 +383,7 @@ def generate_image(
383
 
384
  if len(controlnet_selection) > 0:
385
  controlnet_scales = {
386
- "pose": pose_strength,
387
  "canny": canny_strength,
388
  "depth": depth_strength,
389
  }
@@ -524,16 +524,16 @@ with gr.Blocks(css=css) as demo:
524
  )
525
  with gr.Accordion("Controlnet"):
526
  controlnet_selection = gr.CheckboxGroup(
527
- ["pose", "canny", "depth"], label="Controlnet", value=["pose"],
528
  info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
529
  )
530
- pose_strength = gr.Slider(
531
- label="Pose strength",
532
- minimum=0,
533
- maximum=1.5,
534
- step=0.05,
535
- value=0.40,
536
- )
537
  canny_strength = gr.Slider(
538
  label="Canny strength",
539
  minimum=0,
@@ -617,7 +617,7 @@ with gr.Blocks(css=css) as demo:
617
  num_steps,
618
  identitynet_strength_ratio,
619
  adapter_strength_ratio,
620
- pose_strength,
621
  canny_strength,
622
  depth_strength,
623
  controlnet_selection,
 
21
  from style_template import styles
22
  from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
23
 
24
+ # from controlnet_aux import OpenposeDetector
25
 
26
  import gradio as gr
27
 
 
58
  )
59
  app.prepare(ctx_id=0, det_size=(640, 640))
60
 
61
+ # openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
62
 
63
  depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
64
 
 
86
  )
87
 
88
  # controlnet-pose/canny/depth
89
+ # controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
90
  controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
91
  controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
92
 
93
+ # controlnet_pose = ControlNetModel.from_pretrained(
94
+ # controlnet_pose_model, torch_dtype=dtype
95
+ # ).to(device)
96
  controlnet_canny = ControlNetModel.from_pretrained(
97
  controlnet_canny_model, torch_dtype=dtype
98
  ).to(device)
 
127
  return Image.fromarray(edges, "L")
128
 
129
  controlnet_map = {
130
+ #"pose": controlnet_pose,
131
  "canny": controlnet_canny,
132
  "depth": controlnet_depth,
133
  }
134
  controlnet_map_fn = {
135
+ #"pose": openpose,
136
  "canny": get_canny_image,
137
  "depth": get_depth_map,
138
  }
 
230
  20, # num_steps
231
  0.8, # identitynet_strength_ratio
232
  0.8, # adapter_strength_ratio
233
+ #0.4, # pose_strength
234
  0.3, # canny_strength
235
  0.5, # depth_strength
236
+ ["depth", "canny"], # controlnet_selection
237
  5.0, # guidance_scale
238
  42, # seed
239
  "EulerDiscreteScheduler", # scheduler
 
294
  num_steps,
295
  identitynet_strength_ratio,
296
  adapter_strength_ratio,
297
+ #pose_strength,
298
  canny_strength,
299
  depth_strength,
300
  controlnet_selection,
 
383
 
384
  if len(controlnet_selection) > 0:
385
  controlnet_scales = {
386
+ #"pose": pose_strength,
387
  "canny": canny_strength,
388
  "depth": depth_strength,
389
  }
 
524
  )
525
  with gr.Accordion("Controlnet"):
526
  controlnet_selection = gr.CheckboxGroup(
527
+ ["canny", "depth"], label="Controlnet", value=["depth"],
528
  info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
529
  )
530
+ # pose_strength = gr.Slider(
531
+ # label="Pose strength",
532
+ # minimum=0,
533
+ # maximum=1.5,
534
+ # step=0.05,
535
+ # value=0.40,
536
+ # )
537
  canny_strength = gr.Slider(
538
  label="Canny strength",
539
  minimum=0,
 
617
  num_steps,
618
  identitynet_strength_ratio,
619
  adapter_strength_ratio,
620
+ #pose_strength,
621
  canny_strength,
622
  depth_strength,
623
  controlnet_selection,