notebooks / check_logits_with_serialization_peft_lora.py
sayakpaul's picture
sayakpaul HF staff
Upload check_logits_with_serialization_peft_lora.py
18b1fea
raw
history blame contribute delete
No virus
4.05 kB
# SDXL: 0.613, 0.5566, 0.54, 0.4162, 0.4042, 0.4596, 0.5374, 0.5286, 0.5038
# SD: 0.5396, 0.5707, 0.477, 0.4665, 0.5419, 0.4594, 0.4857, 0.4741, 0.4804
from diffusers import DiffusionPipeline
from huggingface_hub import upload_folder
from peft import LoraConfig
import argparse
import torch
from peft.utils import get_peft_model_state_dict
from diffusers.utils import convert_state_dict_to_diffusers
from diffusers.loaders import StableDiffusionXLLoraLoaderMixin, LoraLoaderMixin
from huggingface_hub import create_repo, upload_folder
mapping = {
"hf-internal-testing/tiny-sd-pipe": "hf-internal-testing/tiny-sd-lora-peft",
"hf-internal-testing/tiny-sdxl-pipe": "hf-internal-testing/tiny-sdxl-lora-peft",
}
def load_pipeline(pipeline_id):
pipe = DiffusionPipeline.from_pretrained(pipeline_id)
return pipe
def get_lora_config():
rank = 4
torch.manual_seed(0)
text_lora_config = LoraConfig(
r=rank,
lora_alpha=rank,
target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
init_lora_weights=False,
)
torch.manual_seed(0)
unet_lora_config = LoraConfig(
r=rank,
lora_alpha=rank,
target_modules=["to_q", "to_k", "to_v", "to_out.0"],
init_lora_weights=False,
)
return text_lora_config, unet_lora_config
def get_dummy_inputs():
pipeline_inputs = {
"prompt": "A painting of a squirrel eating a burger",
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
"generator": torch.manual_seed(0),
}
return pipeline_inputs
def run_inference(args):
has_two_text_encoders = False
pipe = load_pipeline(pipeline_id=args.pipeline_id)
text_lora_config, unet_lora_config = get_lora_config()
pipe.text_encoder.add_adapter(text_lora_config)
pipe.unet.add_adapter(unet_lora_config)
if hasattr(pipe, "text_encoder_2"):
pipe.text_encoder_2.add_adapter(text_lora_config)
has_two_text_encoders = True
inputs = get_dummy_inputs()
outputs = pipe(**inputs).images
predicted_slice = outputs[0, -3:, -3:, -1].flatten().tolist()
print(", ".join([str(round(x, 4)) for x in predicted_slice]))
if args.push_to_hub:
text_encoder_state_dict = convert_state_dict_to_diffusers(
get_peft_model_state_dict(pipe.text_encoder)
)
unet_state_dict = convert_state_dict_to_diffusers(
get_peft_model_state_dict(pipe.unet)
)
if has_two_text_encoders:
text_encoder_2_state_dict = convert_state_dict_to_diffusers(
get_peft_model_state_dict(pipe.text_encoder_2)
)
serialization_cls = (
StableDiffusionXLLoraLoaderMixin
if has_two_text_encoders
else LoraLoaderMixin
)
output_dir = mapping[args.pipeline_id].split("/")[-1]
if not has_two_text_encoders:
serialization_cls.save_lora_weights(
save_directory=output_dir,
unet_lora_layers=unet_state_dict,
text_encoder_lora_layers=text_encoder_state_dict,
)
else:
serialization_cls.save_lora_weights(
save_directory=output_dir,
unet_lora_layers=unet_state_dict,
text_encoder_lora_layers=text_encoder_state_dict,
text_encoder_2_lora_layers=text_encoder_2_state_dict,
)
repo_id = create_repo(repo_id=mapping[args.pipeline_id], exist_ok=True).repo_id
upload_folder(repo_id=repo_id, folder_path=output_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pipeline_id",
type=str,
default="hf-internal-testing/tiny-sd-pipe",
choices=[
"hf-internal-testing/tiny-sd-pipe",
"hf-internal-testing/tiny-sdxl-pipe",
],
)
parser.add_argument("--push_to_hub", action="store_true")
args = parser.parse_args()
run_inference(args)