default.toml 1.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. [model]
  2. v2 = false
  3. v_parameterization = false
  4. pretrained_model_name_or_path = "./sd-models/model.ckpt"
  5. [dataset]
  6. train_data_dir = "./train/input"
  7. reg_data_dir = ""
  8. prior_loss_weight = 1
  9. cache_latents = true
  10. shuffle_caption = true
  11. enable_bucket = true
  12. [additional_network]
  13. network_dim = 32
  14. network_alpha = 16
  15. network_train_unet_only = false
  16. network_train_text_encoder_only = false
  17. network_module = "networks.lora"
  18. network_args = []
  19. [optimizer]
  20. unet_lr = 1e-4
  21. text_encoder_lr = 1e-5
  22. optimizer_type = "AdamW8bit"
  23. lr_scheduler = "cosine_with_restarts"
  24. lr_warmup_steps = 0
  25. lr_restart_cycles = 1
  26. [training]
  27. resolution = "512,512"
  28. batch_size = 1
  29. max_train_epochs = 10
  30. noise_offset = 0.0
  31. keep_tokens = 0
  32. xformers = true
  33. lowram = false
  34. clip_skip = 2
  35. mixed_precision = "fp16"
  36. save_precision = "fp16"
  37. [sample_prompt]
  38. sample_sampler = "euler_a"
  39. sample_every_n_epochs = 1
  40. [saving]
  41. output_name = "output_name"
  42. save_every_n_epochs = 1
  43. save_n_epoch_ratio = 0
  44. save_last_n_epochs = 499
  45. save_state = false
  46. save_model_as = "safetensors"
  47. output_dir = "./output"
  48. logging_dir = "./logs"
  49. log_prefix = "output_name"
  50. [others]
  51. min_bucket_reso = 256
  52. max_bucket_reso = 1024
  53. caption_extension = ".txt"
  54. max_token_length = 225
  55. seed = 1337