util.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. import importlib
  2. import torch
  3. from torch import optim
  4. import numpy as np
  5. from inspect import isfunction
  6. from PIL import Image, ImageDraw, ImageFont
  7. def autocast(f):
  8. def do_autocast(*args, **kwargs):
  9. with torch.cuda.amp.autocast(enabled=True,
  10. dtype=torch.get_autocast_gpu_dtype(),
  11. cache_enabled=torch.is_autocast_cache_enabled()):
  12. return f(*args, **kwargs)
  13. return do_autocast
  14. def log_txt_as_img(wh, xc, size=10):
  15. # wh a tuple of (width, height)
  16. # xc a list of captions to plot
  17. b = len(xc)
  18. txts = list()
  19. for bi in range(b):
  20. txt = Image.new("RGB", wh, color="white")
  21. draw = ImageDraw.Draw(txt)
  22. font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
  23. nc = int(40 * (wh[0] / 256))
  24. lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
  25. try:
  26. draw.text((0, 0), lines, fill="black", font=font)
  27. except UnicodeEncodeError:
  28. print("Cant encode string for logging. Skipping.")
  29. txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
  30. txts.append(txt)
  31. txts = np.stack(txts)
  32. txts = torch.tensor(txts)
  33. return txts
  34. def ismap(x):
  35. if not isinstance(x, torch.Tensor):
  36. return False
  37. return (len(x.shape) == 4) and (x.shape[1] > 3)
  38. def isimage(x):
  39. if not isinstance(x,torch.Tensor):
  40. return False
  41. return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
  42. def exists(x):
  43. return x is not None
  44. def default(val, d):
  45. if exists(val):
  46. return val
  47. return d() if isfunction(d) else d
  48. def mean_flat(tensor):
  49. """
  50. https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
  51. Take the mean over all non-batch dimensions.
  52. """
  53. return tensor.mean(dim=list(range(1, len(tensor.shape))))
  54. def count_params(model, verbose=False):
  55. total_params = sum(p.numel() for p in model.parameters())
  56. if verbose:
  57. print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
  58. return total_params
  59. def instantiate_from_config(config):
  60. if not "target" in config:
  61. if config == '__is_first_stage__':
  62. return None
  63. elif config == "__is_unconditional__":
  64. return None
  65. raise KeyError("Expected key `target` to instantiate.")
  66. return get_obj_from_str(config["target"])(**config.get("params", dict()))
  67. def get_obj_from_str(string, reload=False):
  68. module, cls = string.rsplit(".", 1)
  69. if reload:
  70. module_imp = importlib.import_module(module)
  71. importlib.reload(module_imp)
  72. return getattr(importlib.import_module(module, package=None), cls)
  73. class AdamWwithEMAandWings(optim.Optimizer):
  74. # credit to https://gist.github.com/crowsonkb/65f7265353f403714fce3b2595e0b298
  75. def __init__(self, params, lr=1.e-3, betas=(0.9, 0.999), eps=1.e-8, # TODO: check hyperparameters before using
  76. weight_decay=1.e-2, amsgrad=False, ema_decay=0.9999, # ema decay to match previous code
  77. ema_power=1., param_names=()):
  78. """AdamW that saves EMA versions of the parameters."""
  79. if not 0.0 <= lr:
  80. raise ValueError("Invalid learning rate: {}".format(lr))
  81. if not 0.0 <= eps:
  82. raise ValueError("Invalid epsilon value: {}".format(eps))
  83. if not 0.0 <= betas[0] < 1.0:
  84. raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
  85. if not 0.0 <= betas[1] < 1.0:
  86. raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
  87. if not 0.0 <= weight_decay:
  88. raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
  89. if not 0.0 <= ema_decay <= 1.0:
  90. raise ValueError("Invalid ema_decay value: {}".format(ema_decay))
  91. defaults = dict(lr=lr, betas=betas, eps=eps,
  92. weight_decay=weight_decay, amsgrad=amsgrad, ema_decay=ema_decay,
  93. ema_power=ema_power, param_names=param_names)
  94. super().__init__(params, defaults)
  95. def __setstate__(self, state):
  96. super().__setstate__(state)
  97. for group in self.param_groups:
  98. group.setdefault('amsgrad', False)
  99. @torch.no_grad()
  100. def step(self, closure=None):
  101. """Performs a single optimization step.
  102. Args:
  103. closure (callable, optional): A closure that reevaluates the model
  104. and returns the loss.
  105. """
  106. loss = None
  107. if closure is not None:
  108. with torch.enable_grad():
  109. loss = closure()
  110. for group in self.param_groups:
  111. params_with_grad = []
  112. grads = []
  113. exp_avgs = []
  114. exp_avg_sqs = []
  115. ema_params_with_grad = []
  116. state_sums = []
  117. max_exp_avg_sqs = []
  118. state_steps = []
  119. amsgrad = group['amsgrad']
  120. beta1, beta2 = group['betas']
  121. ema_decay = group['ema_decay']
  122. ema_power = group['ema_power']
  123. for p in group['params']:
  124. if p.grad is None:
  125. continue
  126. params_with_grad.append(p)
  127. if p.grad.is_sparse:
  128. raise RuntimeError('AdamW does not support sparse gradients')
  129. grads.append(p.grad)
  130. state = self.state[p]
  131. # State initialization
  132. if len(state) == 0:
  133. state['step'] = 0
  134. # Exponential moving average of gradient values
  135. state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  136. # Exponential moving average of squared gradient values
  137. state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  138. if amsgrad:
  139. # Maintains max of all exp. moving avg. of sq. grad. values
  140. state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
  141. # Exponential moving average of parameter values
  142. state['param_exp_avg'] = p.detach().float().clone()
  143. exp_avgs.append(state['exp_avg'])
  144. exp_avg_sqs.append(state['exp_avg_sq'])
  145. ema_params_with_grad.append(state['param_exp_avg'])
  146. if amsgrad:
  147. max_exp_avg_sqs.append(state['max_exp_avg_sq'])
  148. # update the steps for each param group update
  149. state['step'] += 1
  150. # record the step after step update
  151. state_steps.append(state['step'])
  152. optim._functional.adamw(params_with_grad,
  153. grads,
  154. exp_avgs,
  155. exp_avg_sqs,
  156. max_exp_avg_sqs,
  157. state_steps,
  158. amsgrad=amsgrad,
  159. beta1=beta1,
  160. beta2=beta2,
  161. lr=group['lr'],
  162. weight_decay=group['weight_decay'],
  163. eps=group['eps'],
  164. maximize=False)
  165. cur_ema_decay = min(ema_decay, 1 - state['step'] ** -ema_power)
  166. for param, ema_param in zip(params_with_grad, ema_params_with_grad):
  167. ema_param.mul_(cur_ema_decay).add_(param.float(), alpha=1 - cur_ema_decay)
  168. return loss