diff --git a/backends/stable_diffusion/diffusionbee_backend.py b/backends/stable_diffusion/diffusionbee_backend.py index d1d28ed5..ac2a3e30 100644 --- a/backends/stable_diffusion/diffusionbee_backend.py +++ b/backends/stable_diffusion/diffusionbee_backend.py @@ -93,7 +93,7 @@ def __getattr__(self, attr): def download_weights(): - global p_14 , p_14_np + global p_15 , p_15_np print("sdbk mltl Loading Model") @@ -101,14 +101,14 @@ def download_weights(): for _ in range(10): try: - p_14 = ProgressBarDownloader(title="Downloading Model 1/2").download( - url="https://huggingface.co/divamgupta/stable_diffusion_mps/resolve/main/sd-v1-4_fp16.tdict", - md5_checksum="9f1fc1e94821d000b811e3bb6e7686b2", + p_15 = ProgressBarDownloader(title="Downloading Model 1/2").download( + url="https://huggingface.co/divamgupta/stable_diffusion_mps/resolve/main/sd-v1-5_fp16.tdict", + md5_checksum="a36c79b8edb4b21b75e50d5834d1f4ae", verify_ssl=False, extract_zip=False, ) - p_14_np = ProgressBarDownloader(title="Downloading Model 2/2").download( + p_15_np = ProgressBarDownloader(title="Downloading Model 2/2").download( url="https://huggingface.co/divamgupta/stable_diffusion_mps/resolve/main/sd-v1-5-inpainting_fp16.tdict", md5_checksum="68303f49cca00968c39abddc20b622a6", verify_ssl=False, @@ -139,10 +139,10 @@ def process_opt(d, generator): if d['model_id'] == 1: model_mode = "inpaint_15" - tdict_path = p_14_np + tdict_path = p_15_np print("sdbk mdvr 1.5_inp") else: - tdict_path = p_14 + tdict_path = p_15 print("sdbk mdvr 1.4") if d['do_controlnet'] == True: @@ -262,7 +262,7 @@ def process_opt(d, generator): def diffusion_bee_main(): - global p_14 , p_14_np + global p_15 , p_15_np download_weights() print("sdbk mltl Loading Model") @@ -277,7 +277,7 @@ def callback(state="" , progress=-1): if "__stop__" in get_input(): return "stop" - generator = StableDiffusion( ModelInterface , p_14 , model_name="sd_1x", callback=callback, debug_output_path=debug_output_path ) + generator = StableDiffusion( ModelInterface , p_15 , model_name="sd_1x", callback=callback, debug_output_path=debug_output_path ) default_d = { "W" : 512 , "H" : 512, "num_imgs":1 , "ddim_steps" : 25 , diff --git a/backends/stable_diffusion/stable_diffusion.py b/backends/stable_diffusion/stable_diffusion.py index 1a3e77af..e2618863 100644 --- a/backends/stable_diffusion/stable_diffusion.py +++ b/backends/stable_diffusion/stable_diffusion.py @@ -235,7 +235,7 @@ def prepare_model_interface(self , sd_run=None ): if self.current_model_name != model_name or self.current_dtype != dtype : - print("Creating model interface") + print("[SD] Creating model interface") assert tdict_path is not None if self.model is not None: @@ -279,7 +279,7 @@ def prepare_model_interface(self , sd_run=None ): if tdict_1 is None: tdict_1 = TDict(tdict_path) - print("Loading LoRA weights") + print("[SD] Loading LoRA weights") extra_weights = add_lora_ti_weights(tdict_1 , weight_additions ) self.model.load_from_state_dict(extra_weights ) self.current_weight_additions = weight_additions @@ -292,7 +292,7 @@ def tokenize(self , prompt): inputs = self.tokenizer.encode(prompt) if len(inputs) >= 77: - print("Prompt is too long, stripping it ") + print("[SD] Prompt is too long, stripping it ") inputs = inputs[:77] if self.current_model_name == "sd_2x": phrase = inputs + [0] * (77 - len(inputs)) @@ -461,7 +461,6 @@ def prepare_init_latent(self , sd_run): def t_to_i(self, t): i = list(self.scheduler.timesteps).index(t) - print("t 2 i " , i ) assert i >= 0 return i diff --git a/backends/stable_diffusion/tests.py b/backends/stable_diffusion/tests.py index 33289ca9..7bd24d42 100644 --- a/backends/stable_diffusion/tests.py +++ b/backends/stable_diffusion/tests.py @@ -434,8 +434,27 @@ def test_lr_32(): # test_lr_1() +def test_11(): -test_sd2_4() + img = sd.generate( + prompt="a tree" , + img_height=512, + img_width=512, + seed=1, + tdict_path=None, + batch_size=1, + num_steps=5, + dtype=ModelInterface.default_float_type, + scheduler='ddim', + mode="txt2img" ) + + gt_p = "./test_assets/outputs/a_tree_1_ddim.png" + + Image.fromarray(img['img'][0]).show() + + + +test_11() exit() diff --git a/electron_app/.gitignore b/electron_app/.gitignore index c1489421..e0c4a0a5 100644 --- a/electron_app/.gitignore +++ b/electron_app/.gitignore @@ -23,4 +23,6 @@ pnpm-debug.log* *.sw? #Electron-builder output -/dist_electron \ No newline at end of file +/dist_electron + +build_config.json \ No newline at end of file diff --git a/electron_app/afterSignHook.js b/electron_app/afterSignHook.js index f0139590..4fa5a815 100644 --- a/electron_app/afterSignHook.js +++ b/electron_app/afterSignHook.js @@ -10,7 +10,7 @@ module.exports = async function (params) { console.log('afterSign hook triggered', params); - let appId = 'com.linerai.diffusionbee' + let appId = 'com.diffusionbee.diffusionbee' let appPath = path.join(params.appOutDir, `${params.packager.appInfo.productFilename}.app`); if (!fs.existsSync(appPath)) { diff --git a/electron_app/package.json b/electron_app/package.json index 505df03f..f8813847 100644 --- a/electron_app/package.json +++ b/electron_app/package.json @@ -1,7 +1,7 @@ { "name": "DiffusionBee", - "version": "1.7.3", - "build_number": "0022", + "version": "1.7.4", + "build_number": "0023", "website": "https://diffusionbee.com", "description": "Diffusion Bee - Stable Diffusion App.", "is_dev": false, diff --git a/electron_app/src/components/ControlNet.vue b/electron_app/src/components/ControlNet.vue index 5190e459..dac13b28 100644 --- a/electron_app/src/components/ControlNet.vue +++ b/electron_app/src/components/ControlNet.vue @@ -8,7 +8,7 @@ -->
Supported models : SD1.x , SD2.x - .ckpt and .safetensors
+Note: If you import SD2.x 768 model, you might need to use ddim_v sampler to get better results.
+