dezzman commited on
Commit
c202d23
·
verified ·
1 Parent(s): bfe43da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -26,6 +26,8 @@ def get_lora_sd_pipeline(
26
  before_params = pipe.unet.parameters()
27
  pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
28
  pipe.unet.set_adapter(adapter_name)
 
 
29
  after_params = pipe.unet.parameters()
30
  print("Parameters changed:", any(torch.any(b != a) for b, a in zip(before_params, after_params)))
31
 
@@ -87,8 +89,7 @@ def infer(
87
  prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
88
  print(f"LoRA adapter loaded: {pipe.unet.active_adapters}")
89
  print(f"LoRA scale applied: {lora_scale}")
90
- # pipe.fuse_lora(lora_scale=lora_scale)
91
- pipe.unet = pipe.unet.merge_and_unload(lora_scale=lora_scale)
92
 
93
  params = {
94
  'prompt_embeds': prompt_embeds,
 
26
  before_params = pipe.unet.parameters()
27
  pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name)
28
  pipe.unet.set_adapter(adapter_name)
29
+ pipe.load_lora_weights(os.path.join(unet_sub_dir, "adapter_model.safetensors"))
30
+ pipe.fuse_lora(lora_scale=0.4)
31
  after_params = pipe.unet.parameters()
32
  print("Parameters changed:", any(torch.any(b != a) for b, a in zip(before_params, after_params)))
33
 
 
89
  prompt_embeds, negative_prompt_embeds = align_embeddings(prompt_embeds, negative_prompt_embeds)
90
  print(f"LoRA adapter loaded: {pipe.unet.active_adapters}")
91
  print(f"LoRA scale applied: {lora_scale}")
92
+ pipe.fuse_lora(lora_scale=lora_scale)
 
93
 
94
  params = {
95
  'prompt_embeds': prompt_embeds,