processing.py 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095
  1. import json
  2. import math
  3. import os
  4. import sys
  5. import warnings
  6. import torch
  7. import numpy as np
  8. from PIL import Image, ImageFilter, ImageOps
  9. import random
  10. import cv2
  11. from skimage import exposure
  12. from typing import Any, Dict, List, Optional
  13. import modules.sd_hijack
  14. from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
  15. from modules.sd_hijack import model_hijack
  16. from modules.shared import opts, cmd_opts, state
  17. import modules.shared as shared
  18. import modules.paths as paths
  19. import modules.face_restoration
  20. import modules.images as images
  21. import modules.styles
  22. import modules.sd_models as sd_models
  23. import modules.sd_vae as sd_vae
  24. import logging
  25. from ldm.data.util import AddMiDaS
  26. from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
  27. from einops import repeat, rearrange
  28. from blendmodes.blend import blendLayers, BlendType
  29. # some of those options should not be changed at all because they would break the model, so I removed them from options.
  30. opt_C = 4
  31. opt_f = 8
  32. def setup_color_correction(image):
  33. logging.info("Calibrating color correction.")
  34. correction_target = cv2.cvtColor(np.asarray(image.copy()), cv2.COLOR_RGB2LAB)
  35. return correction_target
  36. def apply_color_correction(correction, original_image):
  37. logging.info("Applying color correction.")
  38. image = Image.fromarray(cv2.cvtColor(exposure.match_histograms(
  39. cv2.cvtColor(
  40. np.asarray(original_image),
  41. cv2.COLOR_RGB2LAB
  42. ),
  43. correction,
  44. channel_axis=2
  45. ), cv2.COLOR_LAB2RGB).astype("uint8"))
  46. image = blendLayers(image, original_image, BlendType.LUMINOSITY)
  47. return image
  48. def apply_overlay(image, paste_loc, index, overlays):
  49. if overlays is None or index >= len(overlays):
  50. return image
  51. overlay = overlays[index]
  52. if paste_loc is not None:
  53. x, y, w, h = paste_loc
  54. base_image = Image.new('RGBA', (overlay.width, overlay.height))
  55. image = images.resize_image(1, image, w, h)
  56. base_image.paste(image, (x, y))
  57. image = base_image
  58. image = image.convert('RGBA')
  59. image.alpha_composite(overlay)
  60. image = image.convert('RGB')
  61. return image
  62. def txt2img_image_conditioning(sd_model, x, width, height):
  63. if sd_model.model.conditioning_key in {'hybrid', 'concat'}: # Inpainting models
  64. # The "masked-image" in this case will just be all zeros since the entire image is masked.
  65. image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device)
  66. image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning))
  67. # Add the fake full 1s mask to the first dimension.
  68. image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
  69. image_conditioning = image_conditioning.to(x.dtype)
  70. return image_conditioning
  71. elif sd_model.model.conditioning_key == "crossattn-adm": # UnCLIP models
  72. return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
  73. else:
  74. # Dummy zero conditioning if we're not using inpainting or unclip models.
  75. # Still takes up a bit of memory, but no encoder call.
  76. # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size.
  77. return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device)
  78. class StableDiffusionProcessing:
  79. """
  80. The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
  81. """
  82. def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
  83. if sampler_index is not None:
  84. print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
  85. self.outpath_samples: str = outpath_samples
  86. self.outpath_grids: str = outpath_grids
  87. self.prompt: str = prompt
  88. self.prompt_for_display: str = None
  89. self.negative_prompt: str = (negative_prompt or "")
  90. self.styles: list = styles or []
  91. self.seed: int = seed
  92. self.subseed: int = subseed
  93. self.subseed_strength: float = subseed_strength
  94. self.seed_resize_from_h: int = seed_resize_from_h
  95. self.seed_resize_from_w: int = seed_resize_from_w
  96. self.sampler_name: str = sampler_name
  97. self.batch_size: int = batch_size
  98. self.n_iter: int = n_iter
  99. self.steps: int = steps
  100. self.cfg_scale: float = cfg_scale
  101. self.width: int = width
  102. self.height: int = height
  103. self.restore_faces: bool = restore_faces
  104. self.tiling: bool = tiling
  105. self.do_not_save_samples: bool = do_not_save_samples
  106. self.do_not_save_grid: bool = do_not_save_grid
  107. self.extra_generation_params: dict = extra_generation_params or {}
  108. self.overlay_images = overlay_images
  109. self.eta = eta
  110. self.do_not_reload_embeddings = do_not_reload_embeddings
  111. self.paste_to = None
  112. self.color_corrections = None
  113. self.denoising_strength: float = denoising_strength
  114. self.sampler_noise_scheduler_override = None
  115. self.ddim_discretize = ddim_discretize or opts.ddim_discretize
  116. self.s_churn = s_churn or opts.s_churn
  117. self.s_tmin = s_tmin or opts.s_tmin
  118. self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
  119. self.s_noise = s_noise or opts.s_noise
  120. self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
  121. self.override_settings_restore_afterwards = override_settings_restore_afterwards
  122. self.is_using_inpainting_conditioning = False
  123. self.disable_extra_networks = False
  124. if not seed_enable_extras:
  125. self.subseed = -1
  126. self.subseed_strength = 0
  127. self.seed_resize_from_h = 0
  128. self.seed_resize_from_w = 0
  129. self.scripts = None
  130. self.script_args = script_args
  131. self.all_prompts = None
  132. self.all_negative_prompts = None
  133. self.all_seeds = None
  134. self.all_subseeds = None
  135. self.iteration = 0
  136. @property
  137. def sd_model(self):
  138. return shared.sd_model
  139. def txt2img_image_conditioning(self, x, width=None, height=None):
  140. self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'}
  141. return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height)
  142. def depth2img_image_conditioning(self, source_image):
  143. # Use the AddMiDaS helper to Format our source image to suit the MiDaS model
  144. transformer = AddMiDaS(model_type="dpt_hybrid")
  145. transformed = transformer({"jpg": rearrange(source_image[0], "c h w -> h w c")})
  146. midas_in = torch.from_numpy(transformed["midas_in"][None, ...]).to(device=shared.device)
  147. midas_in = repeat(midas_in, "1 ... -> n ...", n=self.batch_size)
  148. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(source_image))
  149. conditioning = torch.nn.functional.interpolate(
  150. self.sd_model.depth_model(midas_in),
  151. size=conditioning_image.shape[2:],
  152. mode="bicubic",
  153. align_corners=False,
  154. )
  155. (depth_min, depth_max) = torch.aminmax(conditioning)
  156. conditioning = 2. * (conditioning - depth_min) / (depth_max - depth_min) - 1.
  157. return conditioning
  158. def edit_image_conditioning(self, source_image):
  159. conditioning_image = self.sd_model.encode_first_stage(source_image).mode()
  160. return conditioning_image
  161. def unclip_image_conditioning(self, source_image):
  162. c_adm = self.sd_model.embedder(source_image)
  163. if self.sd_model.noise_augmentor is not None:
  164. noise_level = 0 # TODO: Allow other noise levels?
  165. c_adm, noise_level_emb = self.sd_model.noise_augmentor(c_adm, noise_level=repeat(torch.tensor([noise_level]).to(c_adm.device), '1 -> b', b=c_adm.shape[0]))
  166. c_adm = torch.cat((c_adm, noise_level_emb), 1)
  167. return c_adm
  168. def inpainting_image_conditioning(self, source_image, latent_image, image_mask=None):
  169. self.is_using_inpainting_conditioning = True
  170. # Handle the different mask inputs
  171. if image_mask is not None:
  172. if torch.is_tensor(image_mask):
  173. conditioning_mask = image_mask
  174. else:
  175. conditioning_mask = np.array(image_mask.convert("L"))
  176. conditioning_mask = conditioning_mask.astype(np.float32) / 255.0
  177. conditioning_mask = torch.from_numpy(conditioning_mask[None, None])
  178. # Inpainting model uses a discretized mask as input, so we round to either 1.0 or 0.0
  179. conditioning_mask = torch.round(conditioning_mask)
  180. else:
  181. conditioning_mask = source_image.new_ones(1, 1, *source_image.shape[-2:])
  182. # Create another latent image, this time with a masked version of the original input.
  183. # Smoothly interpolate between the masked and unmasked latent conditioning image using a parameter.
  184. conditioning_mask = conditioning_mask.to(device=source_image.device, dtype=source_image.dtype)
  185. conditioning_image = torch.lerp(
  186. source_image,
  187. source_image * (1.0 - conditioning_mask),
  188. getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight)
  189. )
  190. # Encode the new masked image using first stage of network.
  191. conditioning_image = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(conditioning_image))
  192. # Create the concatenated conditioning tensor to be fed to `c_concat`
  193. conditioning_mask = torch.nn.functional.interpolate(conditioning_mask, size=latent_image.shape[-2:])
  194. conditioning_mask = conditioning_mask.expand(conditioning_image.shape[0], -1, -1, -1)
  195. image_conditioning = torch.cat([conditioning_mask, conditioning_image], dim=1)
  196. image_conditioning = image_conditioning.to(shared.device).type(self.sd_model.dtype)
  197. return image_conditioning
  198. def img2img_image_conditioning(self, source_image, latent_image, image_mask=None):
  199. source_image = devices.cond_cast_float(source_image)
  200. # HACK: Using introspection as the Depth2Image model doesn't appear to uniquely
  201. # identify itself with a field common to all models. The conditioning_key is also hybrid.
  202. if isinstance(self.sd_model, LatentDepth2ImageDiffusion):
  203. return self.depth2img_image_conditioning(source_image)
  204. if self.sd_model.cond_stage_key == "edit":
  205. return self.edit_image_conditioning(source_image)
  206. if self.sampler.conditioning_key in {'hybrid', 'concat'}:
  207. return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
  208. if self.sampler.conditioning_key == "crossattn-adm":
  209. return self.unclip_image_conditioning(source_image)
  210. # Dummy zero conditioning if we're not using inpainting or depth model.
  211. return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
  212. def init(self, all_prompts, all_seeds, all_subseeds):
  213. pass
  214. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  215. raise NotImplementedError()
  216. def close(self):
  217. self.sampler = None
  218. class Processed:
  219. def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""):
  220. self.images = images_list
  221. self.prompt = p.prompt
  222. self.negative_prompt = p.negative_prompt
  223. self.seed = seed
  224. self.subseed = subseed
  225. self.subseed_strength = p.subseed_strength
  226. self.info = info
  227. self.comments = comments
  228. self.width = p.width
  229. self.height = p.height
  230. self.sampler_name = p.sampler_name
  231. self.cfg_scale = p.cfg_scale
  232. self.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
  233. self.steps = p.steps
  234. self.batch_size = p.batch_size
  235. self.restore_faces = p.restore_faces
  236. self.face_restoration_model = opts.face_restoration_model if p.restore_faces else None
  237. self.sd_model_hash = shared.sd_model.sd_model_hash
  238. self.seed_resize_from_w = p.seed_resize_from_w
  239. self.seed_resize_from_h = p.seed_resize_from_h
  240. self.denoising_strength = getattr(p, 'denoising_strength', None)
  241. self.extra_generation_params = p.extra_generation_params
  242. self.index_of_first_image = index_of_first_image
  243. self.styles = p.styles
  244. self.job_timestamp = state.job_timestamp
  245. self.clip_skip = opts.CLIP_stop_at_last_layers
  246. self.eta = p.eta
  247. self.ddim_discretize = p.ddim_discretize
  248. self.s_churn = p.s_churn
  249. self.s_tmin = p.s_tmin
  250. self.s_tmax = p.s_tmax
  251. self.s_noise = p.s_noise
  252. self.sampler_noise_scheduler_override = p.sampler_noise_scheduler_override
  253. self.prompt = self.prompt if type(self.prompt) != list else self.prompt[0]
  254. self.negative_prompt = self.negative_prompt if type(self.negative_prompt) != list else self.negative_prompt[0]
  255. self.seed = int(self.seed if type(self.seed) != list else self.seed[0]) if self.seed is not None else -1
  256. self.subseed = int(self.subseed if type(self.subseed) != list else self.subseed[0]) if self.subseed is not None else -1
  257. self.is_using_inpainting_conditioning = p.is_using_inpainting_conditioning
  258. self.all_prompts = all_prompts or p.all_prompts or [self.prompt]
  259. self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
  260. self.all_seeds = all_seeds or p.all_seeds or [self.seed]
  261. self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
  262. self.infotexts = infotexts or [info]
  263. def js(self):
  264. obj = {
  265. "prompt": self.all_prompts[0],
  266. "all_prompts": self.all_prompts,
  267. "negative_prompt": self.all_negative_prompts[0],
  268. "all_negative_prompts": self.all_negative_prompts,
  269. "seed": self.seed,
  270. "all_seeds": self.all_seeds,
  271. "subseed": self.subseed,
  272. "all_subseeds": self.all_subseeds,
  273. "subseed_strength": self.subseed_strength,
  274. "width": self.width,
  275. "height": self.height,
  276. "sampler_name": self.sampler_name,
  277. "cfg_scale": self.cfg_scale,
  278. "steps": self.steps,
  279. "batch_size": self.batch_size,
  280. "restore_faces": self.restore_faces,
  281. "face_restoration_model": self.face_restoration_model,
  282. "sd_model_hash": self.sd_model_hash,
  283. "seed_resize_from_w": self.seed_resize_from_w,
  284. "seed_resize_from_h": self.seed_resize_from_h,
  285. "denoising_strength": self.denoising_strength,
  286. "extra_generation_params": self.extra_generation_params,
  287. "index_of_first_image": self.index_of_first_image,
  288. "infotexts": self.infotexts,
  289. "styles": self.styles,
  290. "job_timestamp": self.job_timestamp,
  291. "clip_skip": self.clip_skip,
  292. "is_using_inpainting_conditioning": self.is_using_inpainting_conditioning,
  293. }
  294. return json.dumps(obj)
  295. def infotext(self, p: StableDiffusionProcessing, index):
  296. return create_infotext(p, self.all_prompts, self.all_seeds, self.all_subseeds, comments=[], position_in_batch=index % self.batch_size, iteration=index // self.batch_size)
  297. # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
  298. def slerp(val, low, high):
  299. low_norm = low/torch.norm(low, dim=1, keepdim=True)
  300. high_norm = high/torch.norm(high, dim=1, keepdim=True)
  301. dot = (low_norm*high_norm).sum(1)
  302. if dot.mean() > 0.9995:
  303. return low * val + high * (1 - val)
  304. omega = torch.acos(dot)
  305. so = torch.sin(omega)
  306. res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
  307. return res
  308. def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
  309. eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
  310. xs = []
  311. # if we have multiple seeds, this means we are working with batch size>1; this then
  312. # enables the generation of additional tensors with noise that the sampler will use during its processing.
  313. # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
  314. # produce the same images as with two batches [100], [101].
  315. if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
  316. sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
  317. else:
  318. sampler_noises = None
  319. for i, seed in enumerate(seeds):
  320. noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
  321. subnoise = None
  322. if subseeds is not None:
  323. subseed = 0 if i >= len(subseeds) else subseeds[i]
  324. subnoise = devices.randn(subseed, noise_shape)
  325. # randn results depend on device; gpu and cpu get different results for same seed;
  326. # the way I see it, it's better to do this on CPU, so that everyone gets same result;
  327. # but the original script had it like this, so I do not dare change it for now because
  328. # it will break everyone's seeds.
  329. noise = devices.randn(seed, noise_shape)
  330. if subnoise is not None:
  331. noise = slerp(subseed_strength, noise, subnoise)
  332. if noise_shape != shape:
  333. x = devices.randn(seed, shape)
  334. dx = (shape[2] - noise_shape[2]) // 2
  335. dy = (shape[1] - noise_shape[1]) // 2
  336. w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
  337. h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
  338. tx = 0 if dx < 0 else dx
  339. ty = 0 if dy < 0 else dy
  340. dx = max(-dx, 0)
  341. dy = max(-dy, 0)
  342. x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
  343. noise = x
  344. if sampler_noises is not None:
  345. cnt = p.sampler.number_of_needed_noises(p)
  346. if eta_noise_seed_delta > 0:
  347. torch.manual_seed(seed + eta_noise_seed_delta)
  348. for j in range(cnt):
  349. sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
  350. xs.append(noise)
  351. if sampler_noises is not None:
  352. p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
  353. x = torch.stack(xs).to(shared.device)
  354. return x
  355. def decode_first_stage(model, x):
  356. with devices.autocast(disable=x.dtype == devices.dtype_vae):
  357. x = model.decode_first_stage(x)
  358. return x
  359. def get_fixed_seed(seed):
  360. if seed is None or seed == '' or seed == -1:
  361. return int(random.randrange(4294967294))
  362. return seed
  363. def fix_seed(p):
  364. p.seed = get_fixed_seed(p.seed)
  365. p.subseed = get_fixed_seed(p.subseed)
  366. def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0):
  367. index = position_in_batch + iteration * p.batch_size
  368. clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
  369. generation_params = {
  370. "Steps": p.steps,
  371. "Sampler": p.sampler_name,
  372. "CFG scale": p.cfg_scale,
  373. "Image CFG scale": getattr(p, 'image_cfg_scale', None),
  374. "Seed": all_seeds[index],
  375. "Face restoration": (opts.face_restoration_model if p.restore_faces else None),
  376. "Size": f"{p.width}x{p.height}",
  377. "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
  378. "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
  379. "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
  380. "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
  381. "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
  382. "Denoising strength": getattr(p, 'denoising_strength', None),
  383. "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
  384. "Clip skip": None if clip_skip <= 1 else clip_skip,
  385. "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta,
  386. }
  387. generation_params.update(p.extra_generation_params)
  388. generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
  389. negative_prompt_text = "\nNegative prompt: " + p.all_negative_prompts[index] if p.all_negative_prompts[index] else ""
  390. return f"{all_prompts[index]}{negative_prompt_text}\n{generation_params_text}".strip()
  391. def process_images(p: StableDiffusionProcessing) -> Processed:
  392. stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
  393. try:
  394. for k, v in p.override_settings.items():
  395. setattr(opts, k, v)
  396. if k == 'sd_model_checkpoint':
  397. sd_models.reload_model_weights()
  398. if k == 'sd_vae':
  399. sd_vae.reload_vae_weights()
  400. res = process_images_inner(p)
  401. finally:
  402. # restore opts to original state
  403. if p.override_settings_restore_afterwards:
  404. for k, v in stored_opts.items():
  405. setattr(opts, k, v)
  406. if k == 'sd_model_checkpoint':
  407. sd_models.reload_model_weights()
  408. if k == 'sd_vae':
  409. sd_vae.reload_vae_weights()
  410. return res
  411. def process_images_inner(p: StableDiffusionProcessing) -> Processed:
  412. """this is the main loop that both txt2img and img2img use; it calls func_init once inside all the scopes and func_sample once per batch"""
  413. if type(p.prompt) == list:
  414. assert(len(p.prompt) > 0)
  415. else:
  416. assert p.prompt is not None
  417. devices.torch_gc()
  418. seed = get_fixed_seed(p.seed)
  419. subseed = get_fixed_seed(p.subseed)
  420. modules.sd_hijack.model_hijack.apply_circular(p.tiling)
  421. modules.sd_hijack.model_hijack.clear_comments()
  422. comments = {}
  423. if type(p.prompt) == list:
  424. p.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.prompt]
  425. else:
  426. p.all_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_styles_to_prompt(p.prompt, p.styles)]
  427. if type(p.negative_prompt) == list:
  428. p.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, p.styles) for x in p.negative_prompt]
  429. else:
  430. p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)]
  431. if type(seed) == list:
  432. p.all_seeds = seed
  433. else:
  434. p.all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(p.all_prompts))]
  435. if type(subseed) == list:
  436. p.all_subseeds = subseed
  437. else:
  438. p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
  439. def infotext(iteration=0, position_in_batch=0):
  440. return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch)
  441. if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
  442. model_hijack.embedding_db.load_textual_inversion_embeddings()
  443. if p.scripts is not None:
  444. p.scripts.process(p)
  445. infotexts = []
  446. output_images = []
  447. cached_uc = [None, None]
  448. cached_c = [None, None]
  449. def get_conds_with_caching(function, required_prompts, steps, cache):
  450. """
  451. Returns the result of calling function(shared.sd_model, required_prompts, steps)
  452. using a cache to store the result if the same arguments have been used before.
  453. cache is an array containing two elements. The first element is a tuple
  454. representing the previously used arguments, or None if no arguments
  455. have been used before. The second element is where the previously
  456. computed result is stored.
  457. """
  458. if cache[0] is not None and (required_prompts, steps) == cache[0]:
  459. return cache[1]
  460. with devices.autocast():
  461. cache[1] = function(shared.sd_model, required_prompts, steps)
  462. cache[0] = (required_prompts, steps)
  463. return cache[1]
  464. with torch.no_grad(), p.sd_model.ema_scope():
  465. with devices.autocast():
  466. p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
  467. # for OSX, loading the model during sampling changes the generated picture, so it is loaded here
  468. if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
  469. sd_vae_approx.model()
  470. if state.job_count == -1:
  471. state.job_count = p.n_iter
  472. extra_network_data = None
  473. for n in range(p.n_iter):
  474. p.iteration = n
  475. if state.skipped:
  476. state.skipped = False
  477. if state.interrupted:
  478. break
  479. prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  480. negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
  481. seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
  482. subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
  483. if p.scripts is not None:
  484. p.scripts.before_process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
  485. if len(prompts) == 0:
  486. break
  487. prompts, extra_network_data = extra_networks.parse_prompts(prompts)
  488. if not p.disable_extra_networks:
  489. with devices.autocast():
  490. extra_networks.activate(p, extra_network_data)
  491. if p.scripts is not None:
  492. p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
  493. # params.txt should be saved after scripts.process_batch, since the
  494. # infotext could be modified by that callback
  495. # Example: a wildcard processed by process_batch sets an extra model
  496. # strength, which is saved as "Model Strength: 1.0" in the infotext
  497. if n == 0:
  498. with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
  499. processed = Processed(p, [], p.seed, "")
  500. file.write(processed.infotext(p, 0))
  501. uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc)
  502. c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c)
  503. if len(model_hijack.comments) > 0:
  504. for comment in model_hijack.comments:
  505. comments[comment] = 1
  506. if p.n_iter > 1:
  507. shared.state.job = f"Batch {n+1} out of {p.n_iter}"
  508. with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
  509. samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
  510. x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
  511. for x in x_samples_ddim:
  512. devices.test_for_nans(x, "vae")
  513. x_samples_ddim = torch.stack(x_samples_ddim).float()
  514. x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
  515. del samples_ddim
  516. if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
  517. lowvram.send_everything_to_cpu()
  518. devices.torch_gc()
  519. if p.scripts is not None:
  520. p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
  521. for i, x_sample in enumerate(x_samples_ddim):
  522. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  523. x_sample = x_sample.astype(np.uint8)
  524. if p.restore_faces:
  525. if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
  526. images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
  527. devices.torch_gc()
  528. x_sample = modules.face_restoration.restore_faces(x_sample)
  529. devices.torch_gc()
  530. image = Image.fromarray(x_sample)
  531. if p.scripts is not None:
  532. pp = scripts.PostprocessImageArgs(image)
  533. p.scripts.postprocess_image(p, pp)
  534. image = pp.image
  535. if p.color_corrections is not None and i < len(p.color_corrections):
  536. if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
  537. image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
  538. images.save_image(image_without_cc, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
  539. image = apply_color_correction(p.color_corrections[i], image)
  540. image = apply_overlay(image, p.paste_to, i, p.overlay_images)
  541. if opts.samples_save and not p.do_not_save_samples:
  542. images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
  543. text = infotext(n, i)
  544. infotexts.append(text)
  545. if opts.enable_pnginfo:
  546. image.info["parameters"] = text
  547. output_images.append(image)
  548. if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay:
  549. image_mask = p.mask_for_overlay.convert('RGB')
  550. image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA')
  551. if opts.save_mask:
  552. images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
  553. if opts.save_mask_composite:
  554. images.save_image(image_mask_composite, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
  555. if opts.return_mask:
  556. output_images.append(image_mask)
  557. if opts.return_mask_composite:
  558. output_images.append(image_mask_composite)
  559. del x_samples_ddim
  560. devices.torch_gc()
  561. state.nextjob()
  562. p.color_corrections = None
  563. index_of_first_image = 0
  564. unwanted_grid_because_of_img_count = len(output_images) < 2 and opts.grid_only_if_multiple
  565. if (opts.return_grid or opts.grid_save) and not p.do_not_save_grid and not unwanted_grid_because_of_img_count:
  566. grid = images.image_grid(output_images, p.batch_size)
  567. if opts.return_grid:
  568. text = infotext()
  569. infotexts.insert(0, text)
  570. if opts.enable_pnginfo:
  571. grid.info["parameters"] = text
  572. output_images.insert(0, grid)
  573. index_of_first_image = 1
  574. if opts.grid_save:
  575. images.save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], opts.grid_format, info=infotext(), short_filename=not opts.grid_extended_filename, p=p, grid=True)
  576. if not p.disable_extra_networks and extra_network_data:
  577. extra_networks.deactivate(p, extra_network_data)
  578. devices.torch_gc()
  579. res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts)
  580. if p.scripts is not None:
  581. p.scripts.postprocess(p, res)
  582. return res
  583. def old_hires_fix_first_pass_dimensions(width, height):
  584. """old algorithm for auto-calculating first pass size"""
  585. desired_pixel_count = 512 * 512
  586. actual_pixel_count = width * height
  587. scale = math.sqrt(desired_pixel_count / actual_pixel_count)
  588. width = math.ceil(scale * width / 64) * 64
  589. height = math.ceil(scale * height / 64) * 64
  590. return width, height
  591. class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
  592. sampler = None
  593. def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs):
  594. super().__init__(**kwargs)
  595. self.enable_hr = enable_hr
  596. self.denoising_strength = denoising_strength
  597. self.hr_scale = hr_scale
  598. self.hr_upscaler = hr_upscaler
  599. self.hr_second_pass_steps = hr_second_pass_steps
  600. self.hr_resize_x = hr_resize_x
  601. self.hr_resize_y = hr_resize_y
  602. self.hr_upscale_to_x = hr_resize_x
  603. self.hr_upscale_to_y = hr_resize_y
  604. if firstphase_width != 0 or firstphase_height != 0:
  605. self.hr_upscale_to_x = self.width
  606. self.hr_upscale_to_y = self.height
  607. self.width = firstphase_width
  608. self.height = firstphase_height
  609. self.truncate_x = 0
  610. self.truncate_y = 0
  611. self.applied_old_hires_behavior_to = None
  612. def init(self, all_prompts, all_seeds, all_subseeds):
  613. if self.enable_hr:
  614. if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
  615. self.hr_resize_x = self.width
  616. self.hr_resize_y = self.height
  617. self.hr_upscale_to_x = self.width
  618. self.hr_upscale_to_y = self.height
  619. self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
  620. self.applied_old_hires_behavior_to = (self.width, self.height)
  621. if self.hr_resize_x == 0 and self.hr_resize_y == 0:
  622. self.extra_generation_params["Hires upscale"] = self.hr_scale
  623. self.hr_upscale_to_x = int(self.width * self.hr_scale)
  624. self.hr_upscale_to_y = int(self.height * self.hr_scale)
  625. else:
  626. self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
  627. if self.hr_resize_y == 0:
  628. self.hr_upscale_to_x = self.hr_resize_x
  629. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  630. elif self.hr_resize_x == 0:
  631. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  632. self.hr_upscale_to_y = self.hr_resize_y
  633. else:
  634. target_w = self.hr_resize_x
  635. target_h = self.hr_resize_y
  636. src_ratio = self.width / self.height
  637. dst_ratio = self.hr_resize_x / self.hr_resize_y
  638. if src_ratio < dst_ratio:
  639. self.hr_upscale_to_x = self.hr_resize_x
  640. self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
  641. else:
  642. self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
  643. self.hr_upscale_to_y = self.hr_resize_y
  644. self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
  645. self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
  646. # special case: the user has chosen to do nothing
  647. if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height:
  648. self.enable_hr = False
  649. self.denoising_strength = None
  650. self.extra_generation_params.pop("Hires upscale", None)
  651. self.extra_generation_params.pop("Hires resize", None)
  652. return
  653. if not state.processing_has_refined_job_count:
  654. if state.job_count == -1:
  655. state.job_count = self.n_iter
  656. shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count)
  657. state.job_count = state.job_count * 2
  658. state.processing_has_refined_job_count = True
  659. if self.hr_second_pass_steps:
  660. self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps
  661. if self.hr_upscaler is not None:
  662. self.extra_generation_params["Hires upscaler"] = self.hr_upscaler
  663. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  664. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  665. latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
  666. if self.enable_hr and latent_scale_mode is None:
  667. assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}"
  668. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  669. samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
  670. if not self.enable_hr:
  671. return samples
  672. target_width = self.hr_upscale_to_x
  673. target_height = self.hr_upscale_to_y
  674. def save_intermediate(image, index):
  675. """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images"""
  676. if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix:
  677. return
  678. if not isinstance(image, Image.Image):
  679. image = sd_samplers.sample_to_image(image, index, approximation=0)
  680. info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index)
  681. images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix")
  682. if latent_scale_mode is not None:
  683. for i in range(samples.shape[0]):
  684. save_intermediate(samples, i)
  685. samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"])
  686. # Avoid making the inpainting conditioning unless necessary as
  687. # this does need some extra compute to decode / encode the image again.
  688. if getattr(self, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) < 1.0:
  689. image_conditioning = self.img2img_image_conditioning(decode_first_stage(self.sd_model, samples), samples)
  690. else:
  691. image_conditioning = self.txt2img_image_conditioning(samples)
  692. else:
  693. decoded_samples = decode_first_stage(self.sd_model, samples)
  694. lowres_samples = torch.clamp((decoded_samples + 1.0) / 2.0, min=0.0, max=1.0)
  695. batch_images = []
  696. for i, x_sample in enumerate(lowres_samples):
  697. x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
  698. x_sample = x_sample.astype(np.uint8)
  699. image = Image.fromarray(x_sample)
  700. save_intermediate(image, i)
  701. image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler)
  702. image = np.array(image).astype(np.float32) / 255.0
  703. image = np.moveaxis(image, 2, 0)
  704. batch_images.append(image)
  705. decoded_samples = torch.from_numpy(np.array(batch_images))
  706. decoded_samples = decoded_samples.to(shared.device)
  707. decoded_samples = 2. * decoded_samples - 1.
  708. samples = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(decoded_samples))
  709. image_conditioning = self.img2img_image_conditioning(decoded_samples, samples)
  710. shared.state.nextjob()
  711. img2img_sampler_name = self.sampler_name
  712. if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
  713. img2img_sampler_name = 'DDIM'
  714. self.sampler = sd_samplers.create_sampler(img2img_sampler_name, self.sd_model)
  715. samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
  716. noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
  717. # GC now before running the next img2img to prevent running out of memory
  718. x = None
  719. devices.torch_gc()
  720. samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning)
  721. return samples
  722. class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
  723. sampler = None
  724. def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_strength: float = 0.75, image_cfg_scale: float = None, mask: Any = None, mask_blur: int = 4, inpainting_fill: int = 0, inpaint_full_res: bool = True, inpaint_full_res_padding: int = 0, inpainting_mask_invert: int = 0, initial_noise_multiplier: float = None, **kwargs):
  725. super().__init__(**kwargs)
  726. self.init_images = init_images
  727. self.resize_mode: int = resize_mode
  728. self.denoising_strength: float = denoising_strength
  729. self.image_cfg_scale: float = image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
  730. self.init_latent = None
  731. self.image_mask = mask
  732. self.latent_mask = None
  733. self.mask_for_overlay = None
  734. self.mask_blur = mask_blur
  735. self.inpainting_fill = inpainting_fill
  736. self.inpaint_full_res = inpaint_full_res
  737. self.inpaint_full_res_padding = inpaint_full_res_padding
  738. self.inpainting_mask_invert = inpainting_mask_invert
  739. self.initial_noise_multiplier = opts.initial_noise_multiplier if initial_noise_multiplier is None else initial_noise_multiplier
  740. self.mask = None
  741. self.nmask = None
  742. self.image_conditioning = None
  743. def init(self, all_prompts, all_seeds, all_subseeds):
  744. self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
  745. crop_region = None
  746. image_mask = self.image_mask
  747. if image_mask is not None:
  748. image_mask = image_mask.convert('L')
  749. if self.inpainting_mask_invert:
  750. image_mask = ImageOps.invert(image_mask)
  751. if self.mask_blur > 0:
  752. image_mask = image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur))
  753. if self.inpaint_full_res:
  754. self.mask_for_overlay = image_mask
  755. mask = image_mask.convert('L')
  756. crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
  757. crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
  758. x1, y1, x2, y2 = crop_region
  759. mask = mask.crop(crop_region)
  760. image_mask = images.resize_image(2, mask, self.width, self.height)
  761. self.paste_to = (x1, y1, x2-x1, y2-y1)
  762. else:
  763. image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
  764. np_mask = np.array(image_mask)
  765. np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
  766. self.mask_for_overlay = Image.fromarray(np_mask)
  767. self.overlay_images = []
  768. latent_mask = self.latent_mask if self.latent_mask is not None else image_mask
  769. add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
  770. if add_color_corrections:
  771. self.color_corrections = []
  772. imgs = []
  773. for img in self.init_images:
  774. image = images.flatten(img, opts.img2img_background_color)
  775. if crop_region is None and self.resize_mode != 3:
  776. image = images.resize_image(self.resize_mode, image, self.width, self.height)
  777. if image_mask is not None:
  778. image_masked = Image.new('RGBa', (image.width, image.height))
  779. image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L')))
  780. self.overlay_images.append(image_masked.convert('RGBA'))
  781. # crop_region is not None if we are doing inpaint full res
  782. if crop_region is not None:
  783. image = image.crop(crop_region)
  784. image = images.resize_image(2, image, self.width, self.height)
  785. if image_mask is not None:
  786. if self.inpainting_fill != 1:
  787. image = masking.fill(image, latent_mask)
  788. if add_color_corrections:
  789. self.color_corrections.append(setup_color_correction(image))
  790. image = np.array(image).astype(np.float32) / 255.0
  791. image = np.moveaxis(image, 2, 0)
  792. imgs.append(image)
  793. if len(imgs) == 1:
  794. batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
  795. if self.overlay_images is not None:
  796. self.overlay_images = self.overlay_images * self.batch_size
  797. if self.color_corrections is not None and len(self.color_corrections) == 1:
  798. self.color_corrections = self.color_corrections * self.batch_size
  799. elif len(imgs) <= self.batch_size:
  800. self.batch_size = len(imgs)
  801. batch_images = np.array(imgs)
  802. else:
  803. raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
  804. image = torch.from_numpy(batch_images)
  805. image = 2. * image - 1.
  806. image = image.to(shared.device)
  807. self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image))
  808. if self.resize_mode == 3:
  809. self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
  810. if image_mask is not None:
  811. init_mask = latent_mask
  812. latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
  813. latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
  814. latmask = latmask[0]
  815. latmask = np.around(latmask)
  816. latmask = np.tile(latmask[None], (4, 1, 1))
  817. self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
  818. self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
  819. # this needs to be fixed to be done in sample() using actual seeds for batches
  820. if self.inpainting_fill == 2:
  821. self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
  822. elif self.inpainting_fill == 3:
  823. self.init_latent = self.init_latent * self.mask
  824. self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask)
  825. def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
  826. x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
  827. if self.initial_noise_multiplier != 1.0:
  828. self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
  829. x *= self.initial_noise_multiplier
  830. samples = self.sampler.sample_img2img(self, self.init_latent, x, conditioning, unconditional_conditioning, image_conditioning=self.image_conditioning)
  831. if self.mask is not None:
  832. samples = samples * self.nmask + self.init_latent * self.mask
  833. del x
  834. devices.torch_gc()
  835. return samples