sayakpaul HF staff commited on
Commit
18b1fea
1 Parent(s): 4bbccf7

Upload check_logits_with_serialization_peft_lora.py

Browse files
check_logits_with_serialization_peft_lora.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SDXL: 0.613, 0.5566, 0.54, 0.4162, 0.4042, 0.4596, 0.5374, 0.5286, 0.5038
2
+ # SD: 0.5396, 0.5707, 0.477, 0.4665, 0.5419, 0.4594, 0.4857, 0.4741, 0.4804
3
+
4
+ from diffusers import DiffusionPipeline
5
+ from huggingface_hub import upload_folder
6
+ from peft import LoraConfig
7
+ import argparse
8
+ import torch
9
+
10
+ from peft.utils import get_peft_model_state_dict
11
+ from diffusers.utils import convert_state_dict_to_diffusers
12
+ from diffusers.loaders import StableDiffusionXLLoraLoaderMixin, LoraLoaderMixin
13
+ from huggingface_hub import create_repo, upload_folder
14
+
15
+
16
+ mapping = {
17
+ "hf-internal-testing/tiny-sd-pipe": "hf-internal-testing/tiny-sd-lora-peft",
18
+ "hf-internal-testing/tiny-sdxl-pipe": "hf-internal-testing/tiny-sdxl-lora-peft",
19
+ }
20
+
21
+
22
+ def load_pipeline(pipeline_id):
23
+ pipe = DiffusionPipeline.from_pretrained(pipeline_id)
24
+ return pipe
25
+
26
+
27
+ def get_lora_config():
28
+ rank = 4
29
+
30
+ torch.manual_seed(0)
31
+ text_lora_config = LoraConfig(
32
+ r=rank,
33
+ lora_alpha=rank,
34
+ target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
35
+ init_lora_weights=False,
36
+ )
37
+
38
+ torch.manual_seed(0)
39
+ unet_lora_config = LoraConfig(
40
+ r=rank,
41
+ lora_alpha=rank,
42
+ target_modules=["to_q", "to_k", "to_v", "to_out.0"],
43
+ init_lora_weights=False,
44
+ )
45
+ return text_lora_config, unet_lora_config
46
+
47
+
48
+ def get_dummy_inputs():
49
+ pipeline_inputs = {
50
+ "prompt": "A painting of a squirrel eating a burger",
51
+ "num_inference_steps": 2,
52
+ "guidance_scale": 6.0,
53
+ "output_type": "np",
54
+ "generator": torch.manual_seed(0),
55
+ }
56
+ return pipeline_inputs
57
+
58
+
59
+ def run_inference(args):
60
+ has_two_text_encoders = False
61
+ pipe = load_pipeline(pipeline_id=args.pipeline_id)
62
+ text_lora_config, unet_lora_config = get_lora_config()
63
+
64
+ pipe.text_encoder.add_adapter(text_lora_config)
65
+ pipe.unet.add_adapter(unet_lora_config)
66
+ if hasattr(pipe, "text_encoder_2"):
67
+ pipe.text_encoder_2.add_adapter(text_lora_config)
68
+ has_two_text_encoders = True
69
+
70
+ inputs = get_dummy_inputs()
71
+ outputs = pipe(**inputs).images
72
+ predicted_slice = outputs[0, -3:, -3:, -1].flatten().tolist()
73
+
74
+ print(", ".join([str(round(x, 4)) for x in predicted_slice]))
75
+
76
+ if args.push_to_hub:
77
+ text_encoder_state_dict = convert_state_dict_to_diffusers(
78
+ get_peft_model_state_dict(pipe.text_encoder)
79
+ )
80
+ unet_state_dict = convert_state_dict_to_diffusers(
81
+ get_peft_model_state_dict(pipe.unet)
82
+ )
83
+ if has_two_text_encoders:
84
+ text_encoder_2_state_dict = convert_state_dict_to_diffusers(
85
+ get_peft_model_state_dict(pipe.text_encoder_2)
86
+ )
87
+
88
+ serialization_cls = (
89
+ StableDiffusionXLLoraLoaderMixin
90
+ if has_two_text_encoders
91
+ else LoraLoaderMixin
92
+ )
93
+ output_dir = mapping[args.pipeline_id].split("/")[-1]
94
+
95
+ if not has_two_text_encoders:
96
+ serialization_cls.save_lora_weights(
97
+ save_directory=output_dir,
98
+ unet_lora_layers=unet_state_dict,
99
+ text_encoder_lora_layers=text_encoder_state_dict,
100
+ )
101
+ else:
102
+ serialization_cls.save_lora_weights(
103
+ save_directory=output_dir,
104
+ unet_lora_layers=unet_state_dict,
105
+ text_encoder_lora_layers=text_encoder_state_dict,
106
+ text_encoder_2_lora_layers=text_encoder_2_state_dict,
107
+ )
108
+
109
+ repo_id = create_repo(repo_id=mapping[args.pipeline_id], exist_ok=True).repo_id
110
+ upload_folder(repo_id=repo_id, folder_path=output_dir)
111
+
112
+
113
+ if __name__ == "__main__":
114
+ parser = argparse.ArgumentParser()
115
+ parser.add_argument(
116
+ "--pipeline_id",
117
+ type=str,
118
+ default="hf-internal-testing/tiny-sd-pipe",
119
+ choices=[
120
+ "hf-internal-testing/tiny-sd-pipe",
121
+ "hf-internal-testing/tiny-sdxl-pipe",
122
+ ],
123
+ )
124
+ parser.add_argument("--push_to_hub", action="store_true")
125
+ args = parser.parse_args()
126
+
127
+ run_inference(args)