multimodalart HF staff commited on
Commit
743097a
1 Parent(s): f9bd415

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -250,6 +250,7 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
250
  st = time.time()
251
  pipe.unfuse_lora()
252
  pipe.unload_lora_weights()
 
253
  et = time.time()
254
  elapsed_time = et - st
255
  print('Unfuse and unload LoRA took: ', elapsed_time, 'seconds')
@@ -266,15 +267,9 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
266
  text_embedding_name = sdxl_loras[selected_state_index]["text_embedding_weights"]
267
  embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
268
  state_dict_embedding = load_file(embedding_path)
269
- try:
270
- pipe.unload_textual_inversion()
271
- pipe.load_textual_inversion(state_dict_embedding["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
272
- pipe.load_textual_inversion(state_dict_embedding["clip_g"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
273
- except:
274
- pipe.unload_textual_inversion()
275
- pipe.load_textual_inversion(state_dict_embedding["text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
276
- pipe.load_textual_inversion(state_dict_embedding["text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
277
-
278
  print("Processing prompt...")
279
  st = time.time()
280
  conditioning, pooled = compel(prompt)
 
250
  st = time.time()
251
  pipe.unfuse_lora()
252
  pipe.unload_lora_weights()
253
+ pipe.unload_textual_inversion()
254
  et = time.time()
255
  elapsed_time = et - st
256
  print('Unfuse and unload LoRA took: ', elapsed_time, 'seconds')
 
267
  text_embedding_name = sdxl_loras[selected_state_index]["text_embedding_weights"]
268
  embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
269
  state_dict_embedding = load_file(embedding_path)
270
+ pipe.load_textual_inversion(state_dict_embedding["clip_l" if "clip_l" in state_dict_embedding else "text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
271
+ pipe.load_textual_inversion(state_dict_embedding["clip_g" if "clip_g" in state_dict_embedding else "text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
272
+
 
 
 
 
 
 
273
  print("Processing prompt...")
274
  st = time.time()
275
  conditioning, pooled = compel(prompt)