resize_lora.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. # Convert LoRA to different rank approximation (should only be used to go to lower rank)
  2. # This code is based off the extract_lora_from_models.py file which is based on https://github.com/cloneofsimo/lora/blob/develop/lora_diffusion/cli_svd.py
  3. # Thanks to cloneofsimo
  4. import argparse
  5. import torch
  6. from safetensors.torch import load_file, save_file, safe_open
  7. from tqdm import tqdm
  8. from library import train_util, model_util
  9. import numpy as np
  10. MIN_SV = 1e-6
  11. # Model save and load functions
  12. def load_state_dict(file_name, dtype):
  13. if model_util.is_safetensors(file_name):
  14. sd = load_file(file_name)
  15. with safe_open(file_name, framework="pt") as f:
  16. metadata = f.metadata()
  17. else:
  18. sd = torch.load(file_name, map_location='cpu')
  19. metadata = None
  20. for key in list(sd.keys()):
  21. if type(sd[key]) == torch.Tensor:
  22. sd[key] = sd[key].to(dtype)
  23. return sd, metadata
  24. def save_to_file(file_name, model, state_dict, dtype, metadata):
  25. if dtype is not None:
  26. for key in list(state_dict.keys()):
  27. if type(state_dict[key]) == torch.Tensor:
  28. state_dict[key] = state_dict[key].to(dtype)
  29. if model_util.is_safetensors(file_name):
  30. save_file(model, file_name, metadata)
  31. else:
  32. torch.save(model, file_name)
  33. # Indexing functions
  34. def index_sv_cumulative(S, target):
  35. original_sum = float(torch.sum(S))
  36. cumulative_sums = torch.cumsum(S, dim=0)/original_sum
  37. index = int(torch.searchsorted(cumulative_sums, target)) + 1
  38. index = max(1, min(index, len(S)-1))
  39. return index
  40. def index_sv_fro(S, target):
  41. S_squared = S.pow(2)
  42. s_fro_sq = float(torch.sum(S_squared))
  43. sum_S_squared = torch.cumsum(S_squared, dim=0)/s_fro_sq
  44. index = int(torch.searchsorted(sum_S_squared, target**2)) + 1
  45. index = max(1, min(index, len(S)-1))
  46. return index
  47. def index_sv_ratio(S, target):
  48. max_sv = S[0]
  49. min_sv = max_sv/target
  50. index = int(torch.sum(S > min_sv).item())
  51. index = max(1, min(index, len(S)-1))
  52. return index
  53. # Modified from Kohaku-blueleaf's extract/merge functions
  54. def extract_conv(weight, lora_rank, dynamic_method, dynamic_param, device, scale=1):
  55. out_size, in_size, kernel_size, _ = weight.size()
  56. U, S, Vh = torch.linalg.svd(weight.reshape(out_size, -1).to(device))
  57. param_dict = rank_resize(S, lora_rank, dynamic_method, dynamic_param, scale)
  58. lora_rank = param_dict["new_rank"]
  59. U = U[:, :lora_rank]
  60. S = S[:lora_rank]
  61. U = U @ torch.diag(S)
  62. Vh = Vh[:lora_rank, :]
  63. param_dict["lora_down"] = Vh.reshape(lora_rank, in_size, kernel_size, kernel_size).cpu()
  64. param_dict["lora_up"] = U.reshape(out_size, lora_rank, 1, 1).cpu()
  65. del U, S, Vh, weight
  66. return param_dict
  67. def extract_linear(weight, lora_rank, dynamic_method, dynamic_param, device, scale=1):
  68. out_size, in_size = weight.size()
  69. U, S, Vh = torch.linalg.svd(weight.to(device))
  70. param_dict = rank_resize(S, lora_rank, dynamic_method, dynamic_param, scale)
  71. lora_rank = param_dict["new_rank"]
  72. U = U[:, :lora_rank]
  73. S = S[:lora_rank]
  74. U = U @ torch.diag(S)
  75. Vh = Vh[:lora_rank, :]
  76. param_dict["lora_down"] = Vh.reshape(lora_rank, in_size).cpu()
  77. param_dict["lora_up"] = U.reshape(out_size, lora_rank).cpu()
  78. del U, S, Vh, weight
  79. return param_dict
  80. def merge_conv(lora_down, lora_up, device):
  81. in_rank, in_size, kernel_size, k_ = lora_down.shape
  82. out_size, out_rank, _, _ = lora_up.shape
  83. assert in_rank == out_rank and kernel_size == k_, f"rank {in_rank} {out_rank} or kernel {kernel_size} {k_} mismatch"
  84. lora_down = lora_down.to(device)
  85. lora_up = lora_up.to(device)
  86. merged = lora_up.reshape(out_size, -1) @ lora_down.reshape(in_rank, -1)
  87. weight = merged.reshape(out_size, in_size, kernel_size, kernel_size)
  88. del lora_up, lora_down
  89. return weight
  90. def merge_linear(lora_down, lora_up, device):
  91. in_rank, in_size = lora_down.shape
  92. out_size, out_rank = lora_up.shape
  93. assert in_rank == out_rank, f"rank {in_rank} {out_rank} mismatch"
  94. lora_down = lora_down.to(device)
  95. lora_up = lora_up.to(device)
  96. weight = lora_up @ lora_down
  97. del lora_up, lora_down
  98. return weight
  99. # Calculate new rank
  100. def rank_resize(S, rank, dynamic_method, dynamic_param, scale=1):
  101. param_dict = {}
  102. if dynamic_method=="sv_ratio":
  103. # Calculate new dim and alpha based off ratio
  104. new_rank = index_sv_ratio(S, dynamic_param) + 1
  105. new_alpha = float(scale*new_rank)
  106. elif dynamic_method=="sv_cumulative":
  107. # Calculate new dim and alpha based off cumulative sum
  108. new_rank = index_sv_cumulative(S, dynamic_param) + 1
  109. new_alpha = float(scale*new_rank)
  110. elif dynamic_method=="sv_fro":
  111. # Calculate new dim and alpha based off sqrt sum of squares
  112. new_rank = index_sv_fro(S, dynamic_param) + 1
  113. new_alpha = float(scale*new_rank)
  114. else:
  115. new_rank = rank
  116. new_alpha = float(scale*new_rank)
  117. if S[0] <= MIN_SV: # Zero matrix, set dim to 1
  118. new_rank = 1
  119. new_alpha = float(scale*new_rank)
  120. elif new_rank > rank: # cap max rank at rank
  121. new_rank = rank
  122. new_alpha = float(scale*new_rank)
  123. # Calculate resize info
  124. s_sum = torch.sum(torch.abs(S))
  125. s_rank = torch.sum(torch.abs(S[:new_rank]))
  126. S_squared = S.pow(2)
  127. s_fro = torch.sqrt(torch.sum(S_squared))
  128. s_red_fro = torch.sqrt(torch.sum(S_squared[:new_rank]))
  129. fro_percent = float(s_red_fro/s_fro)
  130. param_dict["new_rank"] = new_rank
  131. param_dict["new_alpha"] = new_alpha
  132. param_dict["sum_retained"] = (s_rank)/s_sum
  133. param_dict["fro_retained"] = fro_percent
  134. param_dict["max_ratio"] = S[0]/S[new_rank - 1]
  135. return param_dict
  136. def resize_lora_model(lora_sd, new_rank, save_dtype, device, dynamic_method, dynamic_param, verbose):
  137. network_alpha = None
  138. network_dim = None
  139. verbose_str = "\n"
  140. fro_list = []
  141. # Extract loaded lora dim and alpha
  142. for key, value in lora_sd.items():
  143. if network_alpha is None and 'alpha' in key:
  144. network_alpha = value
  145. if network_dim is None and 'lora_down' in key and len(value.size()) == 2:
  146. network_dim = value.size()[0]
  147. if network_alpha is not None and network_dim is not None:
  148. break
  149. if network_alpha is None:
  150. network_alpha = network_dim
  151. scale = network_alpha/network_dim
  152. if dynamic_method:
  153. print(f"Dynamically determining new alphas and dims based off {dynamic_method}: {dynamic_param}, max rank is {new_rank}")
  154. lora_down_weight = None
  155. lora_up_weight = None
  156. o_lora_sd = lora_sd.copy()
  157. block_down_name = None
  158. block_up_name = None
  159. with torch.no_grad():
  160. for key, value in tqdm(lora_sd.items()):
  161. weight_name = None
  162. if 'lora_down' in key:
  163. block_down_name = key.split(".")[0]
  164. weight_name = key.split(".")[-1]
  165. lora_down_weight = value
  166. else:
  167. continue
  168. # find corresponding lora_up and alpha
  169. block_up_name = block_down_name
  170. lora_up_weight = lora_sd.get(block_up_name + '.lora_up.' + weight_name, None)
  171. lora_alpha = lora_sd.get(block_down_name + '.alpha', None)
  172. weights_loaded = (lora_down_weight is not None and lora_up_weight is not None)
  173. if weights_loaded:
  174. conv2d = (len(lora_down_weight.size()) == 4)
  175. if lora_alpha is None:
  176. scale = 1.0
  177. else:
  178. scale = lora_alpha/lora_down_weight.size()[0]
  179. if conv2d:
  180. full_weight_matrix = merge_conv(lora_down_weight, lora_up_weight, device)
  181. param_dict = extract_conv(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale)
  182. else:
  183. full_weight_matrix = merge_linear(lora_down_weight, lora_up_weight, device)
  184. param_dict = extract_linear(full_weight_matrix, new_rank, dynamic_method, dynamic_param, device, scale)
  185. if verbose:
  186. max_ratio = param_dict['max_ratio']
  187. sum_retained = param_dict['sum_retained']
  188. fro_retained = param_dict['fro_retained']
  189. if not np.isnan(fro_retained):
  190. fro_list.append(float(fro_retained))
  191. verbose_str+=f"{block_down_name:75} | "
  192. verbose_str+=f"sum(S) retained: {sum_retained:.1%}, fro retained: {fro_retained:.1%}, max(S) ratio: {max_ratio:0.1f}"
  193. if verbose and dynamic_method:
  194. verbose_str+=f", dynamic | dim: {param_dict['new_rank']}, alpha: {param_dict['new_alpha']}\n"
  195. else:
  196. verbose_str+=f"\n"
  197. new_alpha = param_dict['new_alpha']
  198. o_lora_sd[block_down_name + "." + "lora_down.weight"] = param_dict["lora_down"].to(save_dtype).contiguous()
  199. o_lora_sd[block_up_name + "." + "lora_up.weight"] = param_dict["lora_up"].to(save_dtype).contiguous()
  200. o_lora_sd[block_up_name + "." "alpha"] = torch.tensor(param_dict['new_alpha']).to(save_dtype)
  201. block_down_name = None
  202. block_up_name = None
  203. lora_down_weight = None
  204. lora_up_weight = None
  205. weights_loaded = False
  206. del param_dict
  207. if verbose:
  208. print(verbose_str)
  209. print(f"Average Frobenius norm retention: {np.mean(fro_list):.2%} | std: {np.std(fro_list):0.3f}")
  210. print("resizing complete")
  211. return o_lora_sd, network_dim, new_alpha
  212. def resize(args):
  213. def str_to_dtype(p):
  214. if p == 'float':
  215. return torch.float
  216. if p == 'fp16':
  217. return torch.float16
  218. if p == 'bf16':
  219. return torch.bfloat16
  220. return None
  221. if args.dynamic_method and not args.dynamic_param:
  222. raise Exception("If using dynamic_method, then dynamic_param is required")
  223. merge_dtype = str_to_dtype('float') # matmul method above only seems to work in float32
  224. save_dtype = str_to_dtype(args.save_precision)
  225. if save_dtype is None:
  226. save_dtype = merge_dtype
  227. print("loading Model...")
  228. lora_sd, metadata = load_state_dict(args.model, merge_dtype)
  229. print("Resizing Lora...")
  230. state_dict, old_dim, new_alpha = resize_lora_model(lora_sd, args.new_rank, save_dtype, args.device, args.dynamic_method, args.dynamic_param, args.verbose)
  231. # update metadata
  232. if metadata is None:
  233. metadata = {}
  234. comment = metadata.get("ss_training_comment", "")
  235. if not args.dynamic_method:
  236. metadata["ss_training_comment"] = f"dimension is resized from {old_dim} to {args.new_rank}; {comment}"
  237. metadata["ss_network_dim"] = str(args.new_rank)
  238. metadata["ss_network_alpha"] = str(new_alpha)
  239. else:
  240. metadata["ss_training_comment"] = f"Dynamic resize with {args.dynamic_method}: {args.dynamic_param} from {old_dim}; {comment}"
  241. metadata["ss_network_dim"] = 'Dynamic'
  242. metadata["ss_network_alpha"] = 'Dynamic'
  243. model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata)
  244. metadata["sshs_model_hash"] = model_hash
  245. metadata["sshs_legacy_hash"] = legacy_hash
  246. print(f"saving model to: {args.save_to}")
  247. save_to_file(args.save_to, state_dict, state_dict, save_dtype, metadata)
  248. def setup_parser() -> argparse.ArgumentParser:
  249. parser = argparse.ArgumentParser()
  250. parser.add_argument("--save_precision", type=str, default=None,
  251. choices=[None, "float", "fp16", "bf16"], help="precision in saving, float if omitted / 保存時の精度、未指定時はfloat")
  252. parser.add_argument("--new_rank", type=int, default=4,
  253. help="Specify rank of output LoRA / 出力するLoRAのrank (dim)")
  254. parser.add_argument("--save_to", type=str, default=None,
  255. help="destination file name: ckpt or safetensors file / 保存先のファイル名、ckptまたはsafetensors")
  256. parser.add_argument("--model", type=str, default=None,
  257. help="LoRA model to resize at to new rank: ckpt or safetensors file / 読み込むLoRAモデル、ckptまたはsafetensors")
  258. parser.add_argument("--device", type=str, default=None, help="device to use, cuda for GPU / 計算を行うデバイス、cuda でGPUを使う")
  259. parser.add_argument("--verbose", action="store_true",
  260. help="Display verbose resizing information / rank変更時の詳細情報を出力する")
  261. parser.add_argument("--dynamic_method", type=str, default=None, choices=[None, "sv_ratio", "sv_fro", "sv_cumulative"],
  262. help="Specify dynamic resizing method, --new_rank is used as a hard limit for max rank")
  263. parser.add_argument("--dynamic_param", type=float, default=None,
  264. help="Specify target for dynamic reduction")
  265. return parser
  266. if __name__ == '__main__':
  267. parser = setup_parser()
  268. args = parser.parse_args()
  269. resize(args)