诸葛温侯 hace 1 año
padre
commit
a444d17e59

+ 1 - 1
README.md

@@ -2,7 +2,7 @@
 
 The WebUI extension for ControlNet and other injection-based SD controls.
 
-![image](https://github.com/Mikubill/sd-webui-controlnet/assets/20929282/90d9b877-ffbc-454f-a020-09116125f8ed)
+![image](https://github.com/Mikubill/sd-webui-controlnet/assets/19834515/00787fd1-1bc5-4b90-9a23-9683f8458b85)
 
 This extension is for AUTOMATIC1111's [Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui), allows the Web UI to add [ControlNet](https://github.com/lllyasviel/ControlNet) to the original Stable Diffusion model to generate images. The addition is on-the-fly, the merging is not required.
 

+ 37 - 9
scripts/controlnet_ui/controlnet_ui_group.py

@@ -179,9 +179,10 @@ class ControlNetUiGroup(object):
                         visible=False, elem_classes=["cnet-generated-image-group"]
                     ) as self.generated_image_group:
                         self.generated_image = gr.Image(
+                            value=None,
                             label="Preprocessor Preview",
                             elem_id=f"{elem_id_tabname}_{tabname}_generated_image",
-                            elem_classes=["cnet-image"],
+                            elem_classes=["cnet-image"], interactive=False
                         ).style(
                             height=242
                         )  # Gradio's magic number. Only 242 works.
@@ -254,10 +255,7 @@ class ControlNetUiGroup(object):
                 elem_id=f"{elem_id_tabname}_{tabname}_controlnet_send_dimen_button",
             )
 
-        with FormRow(
-            elem_classes=["checkboxes-row", "controlnet_main_options"],
-            variant="compact",
-        ):
+        with FormRow(elem_classes=["controlnet_main_options"]):
             self.enabled = gr.Checkbox(
                 label="Enable",
                 value=self.default_unit.enabled,
@@ -285,7 +283,7 @@ class ControlNetUiGroup(object):
             )
 
         if not shared.opts.data.get("controlnet_disable_control_type", False):
-            with gr.Row(elem_classes="controlnet_control_type"):
+            with gr.Row(elem_classes=["controlnet_control_type", "controlnet_row"]):
                 self.type_filter = gr.Radio(
                     list(preprocessor_filters.keys()),
                     label=f"Control Type",
@@ -294,7 +292,7 @@ class ControlNetUiGroup(object):
                     elem_classes="controlnet_control_type_filter_group",
                 )
 
-        with gr.Row(elem_classes="controlnet_preprocessor_model"):
+        with gr.Row(elem_classes=["controlnet_preprocessor_model", "controlnet_row"]):
             self.module = gr.Dropdown(
                 global_state.ui_preprocessor_keys,
                 label=f"Preprocessor",
@@ -318,7 +316,7 @@ class ControlNetUiGroup(object):
                 elem_id=f"{elem_id_tabname}_{tabname}_controlnet_refresh_models",
             )
 
-        with gr.Row(elem_classes="controlnet_weight_steps"):
+        with gr.Row(elem_classes=["controlnet_weight_steps", "controlnet_row"]):
             self.weight = gr.Slider(
                 label=f"Control Weight",
                 value=self.default_unit.weight,
@@ -672,7 +670,7 @@ class ControlNetUiGroup(object):
                 # generated_image_group
                 gr.update(visible=is_on),
                 # use_preview_as_input,
-                gr.update(visible=is_on),
+                gr.update(visible=False),  # Now this is automatically managed
                 # download_pose_link
                 gr.update() if is_on else gr.update(value=None),
                 # modal edit button
@@ -824,6 +822,36 @@ class ControlNetUiGroup(object):
                     fn=UiControlNetUnit, inputs=list(unit_args), outputs=unit
                 )
 
+        def clear_preview(x):
+            if x:
+                logger.info('Preview as input is cancelled.')
+            return gr.update(value=False), gr.update(value=None)
+
+        for comp in (
+            self.pixel_perfect,
+            self.module,
+            self.input_image,
+            self.processor_res,
+            self.threshold_a,
+            self.threshold_b,
+        ):
+            event_subscribers = []
+            if hasattr(comp, "edit"):
+                event_subscribers.append(comp.edit)
+            elif hasattr(comp, "click"):
+                event_subscribers.append(comp.click)
+            elif isinstance(comp, gr.Slider) and hasattr(comp, "release"):
+                event_subscribers.append(comp.release)
+            elif hasattr(comp, "change"):
+                event_subscribers.append(comp.change)
+            if hasattr(comp, "clear"):
+                event_subscribers.append(comp.clear)
+            for event_subscriber in event_subscribers:
+                event_subscriber(
+                    fn=clear_preview, inputs=self.use_preview_as_input, outputs=[self.use_preview_as_input,
+                                                                                 self.generated_image]
+                )
+
         # keep input_mode in sync
         def ui_controlnet_unit_for_input_mode(input_mode, *args):
             args = list(args)

+ 2 - 0
scripts/controlnet_ui/openpose_editor.py

@@ -6,6 +6,7 @@ from typing import List, Dict, Any, Tuple
 from annotator.openpose import decode_json_as_poses, draw_poses
 from scripts.controlnet_ui.modal import ModalInterface
 from modules import shared
+from scripts.logging import logger
 
 
 def parse_data_url(data_url: str):
@@ -63,6 +64,7 @@ class OpenposeEditor(object):
         def render_pose(pose_url: str) -> Tuple[Dict, Dict]:
             json_string = parse_data_url(pose_url)
             poses, height, weight = decode_json_as_poses(json_string)
+            logger.info('Preview as input is enabled.')
             return (
                 # Generated image.
                 gr.update(

+ 1 - 1
scripts/controlnet_version.py

@@ -1,4 +1,4 @@
-version_flag = 'v1.1.238'
+version_flag = 'v1.1.306'
 
 from scripts.logging import logger
 

+ 5 - 0
scripts/hook.py

@@ -18,6 +18,7 @@ from ldm.modules.attention import BasicTransformerBlock
 from ldm.models.diffusion.ddpm import extract_into_tensor
 
 from modules.prompt_parser import MulticondLearnedConditioning, ComposableScheduledPromptConditioning, ScheduledPromptConditioning
+from modules.processing import StableDiffusionProcessing
 
 
 POSITIVE_MARK_TOKEN = 1024
@@ -83,6 +84,10 @@ def unmark_prompt_context(x):
     mark_batch = mark[:, None, None, None].to(x.dtype).to(x.device)
     uc_indices = mark.detach().cpu().numpy().tolist()
     uc_indices = [i for i, item in enumerate(uc_indices) if item < 0.5]
+
+    StableDiffusionProcessing.cached_c = [None, None]
+    StableDiffusionProcessing.cached_uc = [None, None]
+
     return mark_batch, uc_indices, context
 
 

+ 5 - 1
style.css

@@ -132,4 +132,8 @@
 
 .cnet-disabled-radio {
     opacity: 50%;
-}
+}
+
+.controlnet_row{
+    margin-top: 10px !important;
+}