@@ -1667,9 +1667,10 @@ def save_lora_weights(
1667
1667
weight_name : str = None ,
1668
1668
save_function : Callable = None ,
1669
1669
safe_serialization : bool = True ,
1670
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
1670
1671
):
1671
1672
r"""
1672
- Save the LoRA parameters corresponding to the UNet and text encoder .
1673
+ Save the LoRA parameters corresponding to the transformer .
1673
1674
1674
1675
Arguments:
1675
1676
save_directory (`str` or `os.PathLike`):
@@ -1686,15 +1687,20 @@ def save_lora_weights(
1686
1687
`DIFFUSERS_SAVE_MODE`.
1687
1688
safe_serialization (`bool`, *optional*, defaults to `True`):
1688
1689
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
1690
+ transformer_lora_adapter_metadata: TODO
1689
1691
"""
1690
1692
state_dict = {}
1693
+ lora_adapter_metadata = {}
1691
1694
1692
1695
if not transformer_lora_layers :
1693
1696
raise ValueError ("You must pass `transformer_lora_layers`." )
1694
1697
1695
1698
if transformer_lora_layers :
1696
1699
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
1697
1700
1701
+ if transformer_lora_adapter_metadata :
1702
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
1703
+
1698
1704
# Save the model
1699
1705
cls .write_lora_layers (
1700
1706
state_dict = state_dict ,
@@ -1703,6 +1709,7 @@ def save_lora_weights(
1703
1709
weight_name = weight_name ,
1704
1710
save_function = save_function ,
1705
1711
safe_serialization = safe_serialization ,
1712
+ lora_adapter_metadata = lora_adapter_metadata ,
1706
1713
)
1707
1714
1708
1715
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.fuse_lora
@@ -2985,9 +2992,10 @@ def save_lora_weights(
2985
2992
weight_name : str = None ,
2986
2993
save_function : Callable = None ,
2987
2994
safe_serialization : bool = True ,
2995
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
2988
2996
):
2989
2997
r"""
2990
- Save the LoRA parameters corresponding to the UNet and text encoder .
2998
+ Save the LoRA parameters corresponding to the transformer .
2991
2999
2992
3000
Arguments:
2993
3001
save_directory (`str` or `os.PathLike`):
@@ -3004,15 +3012,20 @@ def save_lora_weights(
3004
3012
`DIFFUSERS_SAVE_MODE`.
3005
3013
safe_serialization (`bool`, *optional*, defaults to `True`):
3006
3014
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
3015
+ transformer_lora_adapter_metadata: TODO
3007
3016
"""
3008
3017
state_dict = {}
3018
+ lora_adapter_metadata = {}
3009
3019
3010
3020
if not transformer_lora_layers :
3011
3021
raise ValueError ("You must pass `transformer_lora_layers`." )
3012
3022
3013
3023
if transformer_lora_layers :
3014
3024
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
3015
3025
3026
+ if transformer_lora_adapter_metadata :
3027
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
3028
+
3016
3029
# Save the model
3017
3030
cls .write_lora_layers (
3018
3031
state_dict = state_dict ,
@@ -3021,6 +3034,7 @@ def save_lora_weights(
3021
3034
weight_name = weight_name ,
3022
3035
save_function = save_function ,
3023
3036
safe_serialization = safe_serialization ,
3037
+ lora_adapter_metadata = lora_adapter_metadata ,
3024
3038
)
3025
3039
3026
3040
def fuse_lora (
@@ -3302,9 +3316,10 @@ def save_lora_weights(
3302
3316
weight_name : str = None ,
3303
3317
save_function : Callable = None ,
3304
3318
safe_serialization : bool = True ,
3319
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
3305
3320
):
3306
3321
r"""
3307
- Save the LoRA parameters corresponding to the UNet and text encoder .
3322
+ Save the LoRA parameters corresponding to the transformer .
3308
3323
3309
3324
Arguments:
3310
3325
save_directory (`str` or `os.PathLike`):
@@ -3321,15 +3336,20 @@ def save_lora_weights(
3321
3336
`DIFFUSERS_SAVE_MODE`.
3322
3337
safe_serialization (`bool`, *optional*, defaults to `True`):
3323
3338
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
3339
+ transformer_lora_adapter_metadata: TODO
3324
3340
"""
3325
3341
state_dict = {}
3342
+ lora_adapter_metadata = {}
3326
3343
3327
3344
if not transformer_lora_layers :
3328
3345
raise ValueError ("You must pass `transformer_lora_layers`." )
3329
3346
3330
3347
if transformer_lora_layers :
3331
3348
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
3332
3349
3350
+ if transformer_lora_adapter_metadata :
3351
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
3352
+
3333
3353
# Save the model
3334
3354
cls .write_lora_layers (
3335
3355
state_dict = state_dict ,
@@ -3338,6 +3358,7 @@ def save_lora_weights(
3338
3358
weight_name = weight_name ,
3339
3359
save_function = save_function ,
3340
3360
safe_serialization = safe_serialization ,
3361
+ lora_adapter_metadata = lora_adapter_metadata ,
3341
3362
)
3342
3363
3343
3364
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora
@@ -3621,9 +3642,10 @@ def save_lora_weights(
3621
3642
weight_name : str = None ,
3622
3643
save_function : Callable = None ,
3623
3644
safe_serialization : bool = True ,
3645
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
3624
3646
):
3625
3647
r"""
3626
- Save the LoRA parameters corresponding to the UNet and text encoder .
3648
+ Save the LoRA parameters corresponding to the transformer .
3627
3649
3628
3650
Arguments:
3629
3651
save_directory (`str` or `os.PathLike`):
@@ -3640,15 +3662,20 @@ def save_lora_weights(
3640
3662
`DIFFUSERS_SAVE_MODE`.
3641
3663
safe_serialization (`bool`, *optional*, defaults to `True`):
3642
3664
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
3665
+ transformer_lora_adapter_metadata: TODO
3643
3666
"""
3644
3667
state_dict = {}
3668
+ lora_adapter_metadata = {}
3645
3669
3646
3670
if not transformer_lora_layers :
3647
3671
raise ValueError ("You must pass `transformer_lora_layers`." )
3648
3672
3649
3673
if transformer_lora_layers :
3650
3674
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
3651
3675
3676
+ if transformer_lora_adapter_metadata :
3677
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
3678
+
3652
3679
# Save the model
3653
3680
cls .write_lora_layers (
3654
3681
state_dict = state_dict ,
@@ -3657,6 +3684,7 @@ def save_lora_weights(
3657
3684
weight_name = weight_name ,
3658
3685
save_function = save_function ,
3659
3686
safe_serialization = safe_serialization ,
3687
+ lora_adapter_metadata = lora_adapter_metadata ,
3660
3688
)
3661
3689
3662
3690
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora
@@ -3940,9 +3968,10 @@ def save_lora_weights(
3940
3968
weight_name : str = None ,
3941
3969
save_function : Callable = None ,
3942
3970
safe_serialization : bool = True ,
3971
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
3943
3972
):
3944
3973
r"""
3945
- Save the LoRA parameters corresponding to the UNet and text encoder .
3974
+ Save the LoRA parameters corresponding to the transformer .
3946
3975
3947
3976
Arguments:
3948
3977
save_directory (`str` or `os.PathLike`):
@@ -3959,15 +3988,20 @@ def save_lora_weights(
3959
3988
`DIFFUSERS_SAVE_MODE`.
3960
3989
safe_serialization (`bool`, *optional*, defaults to `True`):
3961
3990
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
3991
+ transformer_lora_adapter_metadata: TODO
3962
3992
"""
3963
3993
state_dict = {}
3994
+ lora_adapter_metadata = {}
3964
3995
3965
3996
if not transformer_lora_layers :
3966
3997
raise ValueError ("You must pass `transformer_lora_layers`." )
3967
3998
3968
3999
if transformer_lora_layers :
3969
4000
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
3970
4001
4002
+ if transformer_lora_adapter_metadata :
4003
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
4004
+
3971
4005
# Save the model
3972
4006
cls .write_lora_layers (
3973
4007
state_dict = state_dict ,
@@ -3976,6 +4010,7 @@ def save_lora_weights(
3976
4010
weight_name = weight_name ,
3977
4011
save_function = save_function ,
3978
4012
safe_serialization = safe_serialization ,
4013
+ lora_adapter_metadata = lora_adapter_metadata ,
3979
4014
)
3980
4015
3981
4016
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora
@@ -4262,9 +4297,10 @@ def save_lora_weights(
4262
4297
weight_name : str = None ,
4263
4298
save_function : Callable = None ,
4264
4299
safe_serialization : bool = True ,
4300
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
4265
4301
):
4266
4302
r"""
4267
- Save the LoRA parameters corresponding to the UNet and text encoder .
4303
+ Save the LoRA parameters corresponding to the transformer .
4268
4304
4269
4305
Arguments:
4270
4306
save_directory (`str` or `os.PathLike`):
@@ -4281,15 +4317,20 @@ def save_lora_weights(
4281
4317
`DIFFUSERS_SAVE_MODE`.
4282
4318
safe_serialization (`bool`, *optional*, defaults to `True`):
4283
4319
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
4320
+ transformer_lora_adapter_metadata: TODO
4284
4321
"""
4285
4322
state_dict = {}
4323
+ lora_adapter_metadata = {}
4286
4324
4287
4325
if not transformer_lora_layers :
4288
4326
raise ValueError ("You must pass `transformer_lora_layers`." )
4289
4327
4290
4328
if transformer_lora_layers :
4291
4329
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
4292
4330
4331
+ if transformer_lora_adapter_metadata :
4332
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
4333
+
4293
4334
# Save the model
4294
4335
cls .write_lora_layers (
4295
4336
state_dict = state_dict ,
@@ -4298,6 +4339,7 @@ def save_lora_weights(
4298
4339
weight_name = weight_name ,
4299
4340
save_function = save_function ,
4300
4341
safe_serialization = safe_serialization ,
4342
+ lora_adapter_metadata = lora_adapter_metadata ,
4301
4343
)
4302
4344
4303
4345
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora
@@ -4585,9 +4627,10 @@ def save_lora_weights(
4585
4627
weight_name : str = None ,
4586
4628
save_function : Callable = None ,
4587
4629
safe_serialization : bool = True ,
4630
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
4588
4631
):
4589
4632
r"""
4590
- Save the LoRA parameters corresponding to the UNet and text encoder .
4633
+ Save the LoRA parameters corresponding to the transformer .
4591
4634
4592
4635
Arguments:
4593
4636
save_directory (`str` or `os.PathLike`):
@@ -4604,15 +4647,20 @@ def save_lora_weights(
4604
4647
`DIFFUSERS_SAVE_MODE`.
4605
4648
safe_serialization (`bool`, *optional*, defaults to `True`):
4606
4649
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
4650
+ transformer_lora_adapter_metadata: TODO
4607
4651
"""
4608
4652
state_dict = {}
4653
+ lora_adapter_metadata = {}
4609
4654
4610
4655
if not transformer_lora_layers :
4611
4656
raise ValueError ("You must pass `transformer_lora_layers`." )
4612
4657
4613
4658
if transformer_lora_layers :
4614
4659
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
4615
4660
4661
+ if transformer_lora_adapter_metadata :
4662
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
4663
+
4616
4664
# Save the model
4617
4665
cls .write_lora_layers (
4618
4666
state_dict = state_dict ,
@@ -4621,6 +4669,7 @@ def save_lora_weights(
4621
4669
weight_name = weight_name ,
4622
4670
save_function = save_function ,
4623
4671
safe_serialization = safe_serialization ,
4672
+ lora_adapter_metadata = lora_adapter_metadata ,
4624
4673
)
4625
4674
4626
4675
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.fuse_lora
@@ -4890,13 +4939,7 @@ def load_lora_weights(
4890
4939
@classmethod
4891
4940
# Copied from diffusers.loaders.lora_pipeline.SD3LoraLoaderMixin.load_lora_into_transformer with SD3Transformer2DModel->WanTransformer3DModel
4892
4941
def load_lora_into_transformer (
4893
- cls ,
4894
- state_dict ,
4895
- transformer ,
4896
- adapter_name = None ,
4897
- _pipeline = None ,
4898
- low_cpu_mem_usage = False ,
4899
- hotswap : bool = False ,
4942
+ cls , state_dict , transformer , adapter_name = None , _pipeline = None , low_cpu_mem_usage = False , hotswap : bool = False
4900
4943
):
4901
4944
"""
4902
4945
This will load the LoRA layers specified in `state_dict` into `transformer`.
@@ -4946,7 +4989,7 @@ def save_lora_weights(
4946
4989
transformer_lora_adapter_metadata : Optional [dict ] = None ,
4947
4990
):
4948
4991
r"""
4949
- Save the LoRA parameters corresponding to the UNet and text encoder .
4992
+ Save the LoRA parameters corresponding to the transformer .
4950
4993
4951
4994
Arguments:
4952
4995
save_directory (`str` or `os.PathLike`):
@@ -5269,9 +5312,10 @@ def save_lora_weights(
5269
5312
weight_name : str = None ,
5270
5313
save_function : Callable = None ,
5271
5314
safe_serialization : bool = True ,
5315
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
5272
5316
):
5273
5317
r"""
5274
- Save the LoRA parameters corresponding to the UNet and text encoder .
5318
+ Save the LoRA parameters corresponding to the transformer .
5275
5319
5276
5320
Arguments:
5277
5321
save_directory (`str` or `os.PathLike`):
@@ -5288,15 +5332,20 @@ def save_lora_weights(
5288
5332
`DIFFUSERS_SAVE_MODE`.
5289
5333
safe_serialization (`bool`, *optional*, defaults to `True`):
5290
5334
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
5335
+ transformer_lora_adapter_metadata: TODO
5291
5336
"""
5292
5337
state_dict = {}
5338
+ lora_adapter_metadata = {}
5293
5339
5294
5340
if not transformer_lora_layers :
5295
5341
raise ValueError ("You must pass `transformer_lora_layers`." )
5296
5342
5297
5343
if transformer_lora_layers :
5298
5344
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
5299
5345
5346
+ if transformer_lora_adapter_metadata :
5347
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
5348
+
5300
5349
# Save the model
5301
5350
cls .write_lora_layers (
5302
5351
state_dict = state_dict ,
@@ -5305,6 +5354,7 @@ def save_lora_weights(
5305
5354
weight_name = weight_name ,
5306
5355
save_function = save_function ,
5307
5356
safe_serialization = safe_serialization ,
5357
+ lora_adapter_metadata = lora_adapter_metadata ,
5308
5358
)
5309
5359
5310
5360
# Copied from diffusers.loaders.lora_pipeline.CogVideoXLoraLoaderMixin.fuse_lora
@@ -5588,9 +5638,10 @@ def save_lora_weights(
5588
5638
weight_name : str = None ,
5589
5639
save_function : Callable = None ,
5590
5640
safe_serialization : bool = True ,
5641
+ transformer_lora_adapter_metadata : Optional [dict ] = None ,
5591
5642
):
5592
5643
r"""
5593
- Save the LoRA parameters corresponding to the UNet and text encoder .
5644
+ Save the LoRA parameters corresponding to the transformer .
5594
5645
5595
5646
Arguments:
5596
5647
save_directory (`str` or `os.PathLike`):
@@ -5607,15 +5658,20 @@ def save_lora_weights(
5607
5658
`DIFFUSERS_SAVE_MODE`.
5608
5659
safe_serialization (`bool`, *optional*, defaults to `True`):
5609
5660
Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
5661
+ transformer_lora_adapter_metadata: TODO
5610
5662
"""
5611
5663
state_dict = {}
5664
+ lora_adapter_metadata = {}
5612
5665
5613
5666
if not transformer_lora_layers :
5614
5667
raise ValueError ("You must pass `transformer_lora_layers`." )
5615
5668
5616
5669
if transformer_lora_layers :
5617
5670
state_dict .update (cls .pack_weights (transformer_lora_layers , cls .transformer_name ))
5618
5671
5672
+ if transformer_lora_adapter_metadata :
5673
+ lora_adapter_metadata .update (cls .pack_weights (transformer_lora_adapter_metadata , cls .transformer_name ))
5674
+
5619
5675
# Save the model
5620
5676
cls .write_lora_layers (
5621
5677
state_dict = state_dict ,
@@ -5624,6 +5680,7 @@ def save_lora_weights(
5624
5680
weight_name = weight_name ,
5625
5681
save_function = save_function ,
5626
5682
safe_serialization = safe_serialization ,
5683
+ lora_adapter_metadata = lora_adapter_metadata ,
5627
5684
)
5628
5685
5629
5686
# Copied from diffusers.loaders.lora_pipeline.SanaLoraLoaderMixin.fuse_lora
0 commit comments