__init__.py 1.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. import cv2
  2. import numpy as np
  3. import torch
  4. import os
  5. from einops import rearrange
  6. from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
  7. from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
  8. from .utils import pred_lines
  9. from modules import devices
  10. from annotator.annotator_path import models_path
  11. mlsdmodel = None
  12. remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/mlsd_large_512_fp32.pth"
  13. old_modeldir = os.path.dirname(os.path.realpath(__file__))
  14. modeldir = os.path.join(models_path, "mlsd")
  15. def unload_mlsd_model():
  16. global mlsdmodel
  17. if mlsdmodel is not None:
  18. mlsdmodel = mlsdmodel.cpu()
  19. def apply_mlsd(input_image, thr_v, thr_d):
  20. global modelpath, mlsdmodel
  21. if mlsdmodel is None:
  22. modelpath = os.path.join(modeldir, "mlsd_large_512_fp32.pth")
  23. old_modelpath = os.path.join(old_modeldir, "mlsd_large_512_fp32.pth")
  24. if os.path.exists(old_modelpath):
  25. modelpath = old_modelpath
  26. elif not os.path.exists(modelpath):
  27. from basicsr.utils.download_util import load_file_from_url
  28. load_file_from_url(remote_model_path, model_dir=modeldir)
  29. mlsdmodel = MobileV2_MLSD_Large()
  30. mlsdmodel.load_state_dict(torch.load(modelpath), strict=True)
  31. mlsdmodel = mlsdmodel.to(devices.get_device_for("controlnet")).eval()
  32. model = mlsdmodel
  33. assert input_image.ndim == 3
  34. img = input_image
  35. img_output = np.zeros_like(img)
  36. try:
  37. with torch.no_grad():
  38. lines = pred_lines(img, model, [img.shape[0], img.shape[1]], thr_v, thr_d)
  39. for line in lines:
  40. x_start, y_start, x_end, y_end = [int(val) for val in line]
  41. cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
  42. except Exception as e:
  43. pass
  44. return img_output[:, :, 0]