extract_lora_from_models.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. # extract approximating LoRA by svd from two SD models
  2. # The code is based on https://github.com/cloneofsimo/lora/blob/develop/lora_diffusion/cli_svd.py
  3. # Thanks to cloneofsimo!
  4. import argparse
  5. import os
  6. import torch
  7. from safetensors.torch import load_file, save_file
  8. from tqdm import tqdm
  9. import library.model_util as model_util
  10. import lora
  11. CLAMP_QUANTILE = 0.99
  12. MIN_DIFF = 1e-6
  13. def save_to_file(file_name, model, state_dict, dtype):
  14. if dtype is not None:
  15. for key in list(state_dict.keys()):
  16. if type(state_dict[key]) == torch.Tensor:
  17. state_dict[key] = state_dict[key].to(dtype)
  18. if os.path.splitext(file_name)[1] == '.safetensors':
  19. save_file(model, file_name)
  20. else:
  21. torch.save(model, file_name)
  22. def svd(args):
  23. def str_to_dtype(p):
  24. if p == 'float':
  25. return torch.float
  26. if p == 'fp16':
  27. return torch.float16
  28. if p == 'bf16':
  29. return torch.bfloat16
  30. return None
  31. save_dtype = str_to_dtype(args.save_precision)
  32. print(f"loading SD model : {args.model_org}")
  33. text_encoder_o, _, unet_o = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.model_org)
  34. print(f"loading SD model : {args.model_tuned}")
  35. text_encoder_t, _, unet_t = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, args.model_tuned)
  36. # create LoRA network to extract weights: Use dim (rank) as alpha
  37. if args.conv_dim is None:
  38. kwargs = {}
  39. else:
  40. kwargs = {"conv_dim": args.conv_dim, "conv_alpha": args.conv_dim}
  41. lora_network_o = lora.create_network(1.0, args.dim, args.dim, None, text_encoder_o, unet_o, **kwargs)
  42. lora_network_t = lora.create_network(1.0, args.dim, args.dim, None, text_encoder_t, unet_t, **kwargs)
  43. assert len(lora_network_o.text_encoder_loras) == len(
  44. lora_network_t.text_encoder_loras), f"model version is different (SD1.x vs SD2.x) / それぞれのモデルのバージョンが違います(SD1.xベースとSD2.xベース) "
  45. # get diffs
  46. diffs = {}
  47. text_encoder_different = False
  48. for i, (lora_o, lora_t) in enumerate(zip(lora_network_o.text_encoder_loras, lora_network_t.text_encoder_loras)):
  49. lora_name = lora_o.lora_name
  50. module_o = lora_o.org_module
  51. module_t = lora_t.org_module
  52. diff = module_t.weight - module_o.weight
  53. # Text Encoder might be same
  54. if torch.max(torch.abs(diff)) > MIN_DIFF:
  55. text_encoder_different = True
  56. diff = diff.float()
  57. diffs[lora_name] = diff
  58. if not text_encoder_different:
  59. print("Text encoder is same. Extract U-Net only.")
  60. lora_network_o.text_encoder_loras = []
  61. diffs = {}
  62. for i, (lora_o, lora_t) in enumerate(zip(lora_network_o.unet_loras, lora_network_t.unet_loras)):
  63. lora_name = lora_o.lora_name
  64. module_o = lora_o.org_module
  65. module_t = lora_t.org_module
  66. diff = module_t.weight - module_o.weight
  67. diff = diff.float()
  68. if args.device:
  69. diff = diff.to(args.device)
  70. diffs[lora_name] = diff
  71. # make LoRA with svd
  72. print("calculating by svd")
  73. lora_weights = {}
  74. with torch.no_grad():
  75. for lora_name, mat in tqdm(list(diffs.items())):
  76. # if args.conv_dim is None, diffs do not include LoRAs for conv2d-3x3
  77. conv2d = (len(mat.size()) == 4)
  78. kernel_size = None if not conv2d else mat.size()[2:4]
  79. conv2d_3x3 = conv2d and kernel_size != (1, 1)
  80. rank = args.dim if not conv2d_3x3 or args.conv_dim is None else args.conv_dim
  81. out_dim, in_dim = mat.size()[0:2]
  82. if args.device:
  83. mat = mat.to(args.device)
  84. # print(lora_name, mat.size(), mat.device, rank, in_dim, out_dim)
  85. rank = min(rank, in_dim, out_dim) # LoRA rank cannot exceed the original dim
  86. if conv2d:
  87. if conv2d_3x3:
  88. mat = mat.flatten(start_dim=1)
  89. else:
  90. mat = mat.squeeze()
  91. U, S, Vh = torch.linalg.svd(mat)
  92. U = U[:, :rank]
  93. S = S[:rank]
  94. U = U @ torch.diag(S)
  95. Vh = Vh[:rank, :]
  96. dist = torch.cat([U.flatten(), Vh.flatten()])
  97. hi_val = torch.quantile(dist, CLAMP_QUANTILE)
  98. low_val = -hi_val
  99. U = U.clamp(low_val, hi_val)
  100. Vh = Vh.clamp(low_val, hi_val)
  101. if conv2d:
  102. U = U.reshape(out_dim, rank, 1, 1)
  103. Vh = Vh.reshape(rank, in_dim, kernel_size[0], kernel_size[1])
  104. U = U.to("cpu").contiguous()
  105. Vh = Vh.to("cpu").contiguous()
  106. lora_weights[lora_name] = (U, Vh)
  107. # make state dict for LoRA
  108. lora_sd = {}
  109. for lora_name, (up_weight, down_weight) in lora_weights.items():
  110. lora_sd[lora_name + '.lora_up.weight'] = up_weight
  111. lora_sd[lora_name + '.lora_down.weight'] = down_weight
  112. lora_sd[lora_name + '.alpha'] = torch.tensor(down_weight.size()[0])
  113. # load state dict to LoRA and save it
  114. lora_network_save, lora_sd = lora.create_network_from_weights(1.0, None, None, text_encoder_o, unet_o, weights_sd=lora_sd)
  115. lora_network_save.apply_to(text_encoder_o, unet_o) # create internal module references for state_dict
  116. info = lora_network_save.load_state_dict(lora_sd)
  117. print(f"Loading extracted LoRA weights: {info}")
  118. dir_name = os.path.dirname(args.save_to)
  119. if dir_name and not os.path.exists(dir_name):
  120. os.makedirs(dir_name, exist_ok=True)
  121. # minimum metadata
  122. metadata = {"ss_network_module": "networks.lora", "ss_network_dim": str(args.dim), "ss_network_alpha": str(args.dim)}
  123. lora_network_save.save_weights(args.save_to, save_dtype, metadata)
  124. print(f"LoRA weights are saved to: {args.save_to}")
  125. def setup_parser() -> argparse.ArgumentParser:
  126. parser = argparse.ArgumentParser()
  127. parser.add_argument("--v2", action='store_true',
  128. help='load Stable Diffusion v2.x model / Stable Diffusion 2.xのモデルを読み込む')
  129. parser.add_argument("--save_precision", type=str, default=None,
  130. choices=[None, "float", "fp16", "bf16"], help="precision in saving, same to merging if omitted / 保存時に精度を変更して保存する、省略時はfloat")
  131. parser.add_argument("--model_org", type=str, default=None,
  132. help="Stable Diffusion original model: ckpt or safetensors file / 元モデル、ckptまたはsafetensors")
  133. parser.add_argument("--model_tuned", type=str, default=None,
  134. help="Stable Diffusion tuned model, LoRA is difference of `original to tuned`: ckpt or safetensors file / 派生モデル(生成されるLoRAは元→派生の差分になります)、ckptまたはsafetensors")
  135. parser.add_argument("--save_to", type=str, default=None,
  136. help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors")
  137. parser.add_argument("--dim", type=int, default=4, help="dimension (rank) of LoRA (default 4) / LoRAの次元数(rank)(デフォルト4)")
  138. parser.add_argument("--conv_dim", type=int, default=None,
  139. help="dimension (rank) of LoRA for Conv2d-3x3 (default None, disabled) / LoRAのConv2d-3x3の次元数(rank)(デフォルトNone、適用なし)")
  140. parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う")
  141. return parser
  142. if __name__ == '__main__':
  143. parser = setup_parser()
  144. args = parser.parse_args()
  145. svd(args)