img2img.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. """make variations of input image"""
  2. import argparse, os
  3. import PIL
  4. import torch
  5. import numpy as np
  6. from omegaconf import OmegaConf
  7. from PIL import Image
  8. from tqdm import tqdm, trange
  9. from itertools import islice
  10. from einops import rearrange, repeat
  11. from torchvision.utils import make_grid
  12. from torch import autocast
  13. from contextlib import nullcontext
  14. from pytorch_lightning import seed_everything
  15. from imwatermark import WatermarkEncoder
  16. from scripts.txt2img import put_watermark
  17. from ldm.util import instantiate_from_config
  18. from ldm.models.diffusion.ddim import DDIMSampler
  19. def chunk(it, size):
  20. it = iter(it)
  21. return iter(lambda: tuple(islice(it, size)), ())
  22. def load_model_from_config(config, ckpt, verbose=False):
  23. print(f"Loading model from {ckpt}")
  24. pl_sd = torch.load(ckpt, map_location="cpu")
  25. if "global_step" in pl_sd:
  26. print(f"Global Step: {pl_sd['global_step']}")
  27. sd = pl_sd["state_dict"]
  28. model = instantiate_from_config(config.model)
  29. m, u = model.load_state_dict(sd, strict=False)
  30. if len(m) > 0 and verbose:
  31. print("missing keys:")
  32. print(m)
  33. if len(u) > 0 and verbose:
  34. print("unexpected keys:")
  35. print(u)
  36. model.cuda()
  37. model.eval()
  38. return model
  39. def load_img(path):
  40. image = Image.open(path).convert("RGB")
  41. w, h = image.size
  42. print(f"loaded input image of size ({w}, {h}) from {path}")
  43. w, h = map(lambda x: x - x % 64, (w, h)) # resize to integer multiple of 64
  44. image = image.resize((w, h), resample=PIL.Image.LANCZOS)
  45. image = np.array(image).astype(np.float32) / 255.0
  46. image = image[None].transpose(0, 3, 1, 2)
  47. image = torch.from_numpy(image)
  48. return 2. * image - 1.
  49. def main():
  50. parser = argparse.ArgumentParser()
  51. parser.add_argument(
  52. "--prompt",
  53. type=str,
  54. nargs="?",
  55. default="a painting of a virus monster playing guitar",
  56. help="the prompt to render"
  57. )
  58. parser.add_argument(
  59. "--init-img",
  60. type=str,
  61. nargs="?",
  62. help="path to the input image"
  63. )
  64. parser.add_argument(
  65. "--outdir",
  66. type=str,
  67. nargs="?",
  68. help="dir to write results to",
  69. default="outputs/img2img-samples"
  70. )
  71. parser.add_argument(
  72. "--ddim_steps",
  73. type=int,
  74. default=50,
  75. help="number of ddim sampling steps",
  76. )
  77. parser.add_argument(
  78. "--fixed_code",
  79. action='store_true',
  80. help="if enabled, uses the same starting code across all samples ",
  81. )
  82. parser.add_argument(
  83. "--ddim_eta",
  84. type=float,
  85. default=0.0,
  86. help="ddim eta (eta=0.0 corresponds to deterministic sampling",
  87. )
  88. parser.add_argument(
  89. "--n_iter",
  90. type=int,
  91. default=1,
  92. help="sample this often",
  93. )
  94. parser.add_argument(
  95. "--C",
  96. type=int,
  97. default=4,
  98. help="latent channels",
  99. )
  100. parser.add_argument(
  101. "--f",
  102. type=int,
  103. default=8,
  104. help="downsampling factor, most often 8 or 16",
  105. )
  106. parser.add_argument(
  107. "--n_samples",
  108. type=int,
  109. default=2,
  110. help="how many samples to produce for each given prompt. A.k.a batch size",
  111. )
  112. parser.add_argument(
  113. "--n_rows",
  114. type=int,
  115. default=0,
  116. help="rows in the grid (default: n_samples)",
  117. )
  118. parser.add_argument(
  119. "--scale",
  120. type=float,
  121. default=9.0,
  122. help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))",
  123. )
  124. parser.add_argument(
  125. "--strength",
  126. type=float,
  127. default=0.8,
  128. help="strength for noising/unnoising. 1.0 corresponds to full destruction of information in init image",
  129. )
  130. parser.add_argument(
  131. "--from-file",
  132. type=str,
  133. help="if specified, load prompts from this file",
  134. )
  135. parser.add_argument(
  136. "--config",
  137. type=str,
  138. default="configs/stable-diffusion/v2-inference.yaml",
  139. help="path to config which constructs model",
  140. )
  141. parser.add_argument(
  142. "--ckpt",
  143. type=str,
  144. help="path to checkpoint of model",
  145. )
  146. parser.add_argument(
  147. "--seed",
  148. type=int,
  149. default=42,
  150. help="the seed (for reproducible sampling)",
  151. )
  152. parser.add_argument(
  153. "--precision",
  154. type=str,
  155. help="evaluate at this precision",
  156. choices=["full", "autocast"],
  157. default="autocast"
  158. )
  159. opt = parser.parse_args()
  160. seed_everything(opt.seed)
  161. config = OmegaConf.load(f"{opt.config}")
  162. model = load_model_from_config(config, f"{opt.ckpt}")
  163. device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  164. model = model.to(device)
  165. sampler = DDIMSampler(model)
  166. os.makedirs(opt.outdir, exist_ok=True)
  167. outpath = opt.outdir
  168. print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...")
  169. wm = "SDV2"
  170. wm_encoder = WatermarkEncoder()
  171. wm_encoder.set_watermark('bytes', wm.encode('utf-8'))
  172. batch_size = opt.n_samples
  173. n_rows = opt.n_rows if opt.n_rows > 0 else batch_size
  174. if not opt.from_file:
  175. prompt = opt.prompt
  176. assert prompt is not None
  177. data = [batch_size * [prompt]]
  178. else:
  179. print(f"reading prompts from {opt.from_file}")
  180. with open(opt.from_file, "r") as f:
  181. data = f.read().splitlines()
  182. data = list(chunk(data, batch_size))
  183. sample_path = os.path.join(outpath, "samples")
  184. os.makedirs(sample_path, exist_ok=True)
  185. base_count = len(os.listdir(sample_path))
  186. grid_count = len(os.listdir(outpath)) - 1
  187. assert os.path.isfile(opt.init_img)
  188. init_image = load_img(opt.init_img).to(device)
  189. init_image = repeat(init_image, '1 ... -> b ...', b=batch_size)
  190. init_latent = model.get_first_stage_encoding(model.encode_first_stage(init_image)) # move to latent space
  191. sampler.make_schedule(ddim_num_steps=opt.ddim_steps, ddim_eta=opt.ddim_eta, verbose=False)
  192. assert 0. <= opt.strength <= 1., 'can only work with strength in [0.0, 1.0]'
  193. t_enc = int(opt.strength * opt.ddim_steps)
  194. print(f"target t_enc is {t_enc} steps")
  195. precision_scope = autocast if opt.precision == "autocast" else nullcontext
  196. with torch.no_grad():
  197. with precision_scope("cuda"):
  198. with model.ema_scope():
  199. all_samples = list()
  200. for n in trange(opt.n_iter, desc="Sampling"):
  201. for prompts in tqdm(data, desc="data"):
  202. uc = None
  203. if opt.scale != 1.0:
  204. uc = model.get_learned_conditioning(batch_size * [""])
  205. if isinstance(prompts, tuple):
  206. prompts = list(prompts)
  207. c = model.get_learned_conditioning(prompts)
  208. # encode (scaled latent)
  209. z_enc = sampler.stochastic_encode(init_latent, torch.tensor([t_enc] * batch_size).to(device))
  210. # decode it
  211. samples = sampler.decode(z_enc, c, t_enc, unconditional_guidance_scale=opt.scale,
  212. unconditional_conditioning=uc, )
  213. x_samples = model.decode_first_stage(samples)
  214. x_samples = torch.clamp((x_samples + 1.0) / 2.0, min=0.0, max=1.0)
  215. for x_sample in x_samples:
  216. x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
  217. img = Image.fromarray(x_sample.astype(np.uint8))
  218. img = put_watermark(img, wm_encoder)
  219. img.save(os.path.join(sample_path, f"{base_count:05}.png"))
  220. base_count += 1
  221. all_samples.append(x_samples)
  222. # additionally, save as grid
  223. grid = torch.stack(all_samples, 0)
  224. grid = rearrange(grid, 'n b c h w -> (n b) c h w')
  225. grid = make_grid(grid, nrow=n_rows)
  226. # to image
  227. grid = 255. * rearrange(grid, 'c h w -> h w c').cpu().numpy()
  228. grid = Image.fromarray(grid.astype(np.uint8))
  229. grid = put_watermark(grid, wm_encoder)
  230. grid.save(os.path.join(outpath, f'grid-{grid_count:04}.png'))
  231. grid_count += 1
  232. print(f"Your samples are ready and waiting for you here: \n{outpath} \nEnjoy.")
  233. if __name__ == "__main__":
  234. main()