From e711ae6d7d606ea7f22523492f76249c1438a0a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 9 Jan 2026 22:01:29 +0100 Subject: [PATCH 01/23] Select backend devices via arg --- examples/common/common.hpp | 63 ++++++---- include/stable-diffusion.h | 12 +- src/stable-diffusion.cpp | 235 +++++++++++++++++++++---------------- 3 files changed, 182 insertions(+), 128 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 9389b03a3..c3df7467d 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -447,6 +447,13 @@ struct SDContextParams { std::string tensor_type_rules; std::string lora_model_dir = "."; + std::string main_backend_device; + std::string diffusion_backend_device; + std::string clip_backend_device; + std::string vae_backend_device; + std::string tae_backend_device; + std::string control_net_backend_device; + std::map embedding_map; std::vector embedding_vec; @@ -454,9 +461,6 @@ struct SDContextParams { rng_type_t sampler_rng_type = RNG_TYPE_COUNT; bool offload_params_to_cpu = false; bool enable_mmap = false; - bool control_net_cpu = false; - bool clip_on_cpu = false; - bool vae_on_cpu = false; bool flash_attn = false; bool diffusion_flash_attn = false; bool diffusion_conv_direct = false; @@ -562,6 +566,31 @@ struct SDContextParams { "--upscale-model", "path to esrgan model.", &esrgan_path}, + {"", + "--main-backend-device", + "default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise cpu)", + &main_backend_device}, + {"", + "--diffusion-backend-device", + "device to use for diffusion (defaults to main-backend-device)", + &diffusion_backend_device}, + {"", + "--clip-backend-device", + "device to use for clip (defaults to main-backend-device)", + &clip_backend_device}, + {"", + "--vae-backend-device", + "device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified", + &vae_backend_device}, + {"", + "--tae-backend-device", + "device to use for tae (defaults to vae-backend-device)", + &tae_backend_device}, + {"", + "--control-net-backend-device", + "device to use for control net (defaults to main-backend-device)", + &control_net_backend_device}, + }; options.int_options = { @@ -600,18 +629,6 @@ struct SDContextParams { "--mmap", "whether to memory-map model", true, &enable_mmap}, - {"", - "--control-net-cpu", - "keep controlnet in cpu (for low vram)", - true, &control_net_cpu}, - {"", - "--clip-on-cpu", - "keep clip in cpu (for low vram)", - true, &clip_on_cpu}, - {"", - "--vae-on-cpu", - "keep vae in cpu (for low vram)", - true, &vae_on_cpu}, {"", "--fa", "use flash attention", @@ -876,6 +893,7 @@ struct SDContextParams { std::string embeddings_str = emb_ss.str(); std::ostringstream oss; + // TODO backend devices oss << "SDContextParams {\n" << " n_threads: " << n_threads << ",\n" << " model_path: \"" << model_path << "\",\n" @@ -901,9 +919,9 @@ struct SDContextParams { << " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n" << " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n" << " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n" - << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" - << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" - << " vae_on_cpu: " << (vae_on_cpu ? "true" : "false") << ",\n" + // << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" + // << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" + // << " vae_on_cpu: " << (vae_on_cpu ? "true" : "false") << ",\n" << " flash_attn: " << (flash_attn ? "true" : "false") << ",\n" << " diffusion_flash_attn: " << (diffusion_flash_attn ? "true" : "false") << ",\n" << " diffusion_conv_direct: " << (diffusion_conv_direct ? "true" : "false") << ",\n" @@ -966,9 +984,6 @@ struct SDContextParams { lora_apply_mode, offload_params_to_cpu, enable_mmap, - clip_on_cpu, - control_net_cpu, - vae_on_cpu, flash_attn, diffusion_flash_attn, taesd_preview, @@ -981,6 +996,12 @@ struct SDContextParams { chroma_use_t5_mask, chroma_t5_mask_pad, qwen_image_zero_cond_t, + main_backend_device.c_str(), + diffusion_backend_device.c_str(), + clip_backend_device.c_str(), + vae_backend_device.c_str(), + tae_backend_device.c_str(), + control_net_backend_device.c_str(), }; return sd_ctx_params; } diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 029c2ab1d..4a8b645ae 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -186,9 +186,9 @@ typedef struct { enum lora_apply_mode_t lora_apply_mode; bool offload_params_to_cpu; bool enable_mmap; - bool keep_clip_on_cpu; - bool keep_control_net_on_cpu; - bool keep_vae_on_cpu; + // bool keep_clip_on_cpu; + // bool keep_control_net_on_cpu; + // bool keep_vae_on_cpu; bool flash_attn; bool diffusion_flash_attn; bool tae_preview_only; @@ -201,6 +201,12 @@ typedef struct { bool chroma_use_t5_mask; int chroma_t5_mask_pad; bool qwen_image_zero_cond_t; + const char* main_device; + const char* diffusion_device; + const char* clip_device; + const char* vae_device; + const char* tae_device; + const char* control_net_device; } sd_ctx_params_t; typedef struct { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index a59ff23e8..8202e43aa 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -105,9 +105,13 @@ static float get_cache_reuse_threshold(const sd_cache_params_t& params) { class StableDiffusionGGML { public: ggml_backend_t backend = nullptr; // general backend + ggml_backend_t diffusion_backend = nullptr; ggml_backend_t clip_backend = nullptr; ggml_backend_t control_net_backend = nullptr; ggml_backend_t vae_backend = nullptr; + ggml_backend_t tae_backend = nullptr; + + // TODO: clip_vision and photomaker backends SDVersion version; bool vae_decode_only = false; @@ -155,72 +159,65 @@ class StableDiffusionGGML { StableDiffusionGGML() = default; ~StableDiffusionGGML() { + if (diffusion_backend != backend) { + ggml_backend_free(diffusion_backend); + } if (clip_backend != backend) { ggml_backend_free(clip_backend); } if (control_net_backend != backend) { ggml_backend_free(control_net_backend); } + if (tae_backend != vae_backend) { + ggml_backend_free(tae_backend); + } if (vae_backend != backend) { ggml_backend_free(vae_backend); } ggml_backend_free(backend); } - void init_backend() { -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - size_t device = 0; - const int device_count = ggml_backend_vk_get_device_count(); - if (device_count) { - const char* SD_VK_DEVICE = getenv("SD_VK_DEVICE"); - if (SD_VK_DEVICE != nullptr) { - std::string sd_vk_device_str = SD_VK_DEVICE; - try { - device = std::stoull(sd_vk_device_str); - } catch (const std::invalid_argument&) { - LOG_WARN("SD_VK_DEVICE environment variable is not a valid integer (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } catch (const std::out_of_range&) { - LOG_WARN("SD_VK_DEVICE environment variable value is out of range for `unsigned long long` type (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } - if (device >= device_count) { - LOG_WARN("Cannot find targeted vulkan device (%llu). Falling back to device 0.", device); - device = 0; - } - } - LOG_INFO("Vulkan: Using device %llu", device); - backend = ggml_backend_vk_init(device); + void list_backends() { + // TODO: expose via C API and fill a cstr + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + LOG_INFO("%s", ggml_backend_dev_name(ggml_backend_dev_get(i))); } - if (!backend) { - LOG_WARN("Failed to initialize Vulkan backend"); + } + + bool backend_name_exists(std::string name) { + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { + return true; + } } -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - // ggml_log_set(ggml_log_callback_default, nullptr); // Optional ggml logs - backend = ggml_backend_opencl_init(); - if (!backend) { - LOG_WARN("Failed to initialize OpenCL backend"); + return false; + } + + std::string sanitize_backend_name(std::string name) { + if (name == "" || backend_name_exists(name)) { + return name; + } else { + LOG_WARN("Backend %s not found, using default backend", name.c_str()); + return ""; } -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif + } - if (!backend) { - LOG_DEBUG("Using CPU backend"); - backend = ggml_backend_cpu_init(); + std::string get_default_backend_name() { + // should pick the same backend as ggml_backend_init_best + ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + return ggml_backend_dev_name(dev); + } + + ggml_backend_t init_named_backend(std::string name = "") { + LOG_DEBUG("Initializing backend: %s", name.c_str()); + if (name.empty()) { + return ggml_backend_init_best(); + } else { + return ggml_backend_init_by_name(name.c_str(), nullptr); } } @@ -251,7 +248,44 @@ class StableDiffusionGGML { ggml_log_set(ggml_log_callback_default, nullptr); - init_backend(); + list_backends(); + + std::string default_backend_name = get_default_backend_name(); + + std::string override_default_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->main_device)); + + if (override_default_backend_name.size() > 0) { + LOG_INFO("Setting default backend to %s", override_default_backend_name.c_str()); + default_backend_name = override_default_backend_name; + } + + std::string diffusion_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->diffusion_device)); + std::string clip_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->clip_device)); + std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); + std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); + std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); + + bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; + bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); + bool control_net_backend_is_default = (control_net_backend_name.empty() || control_net_backend_name == default_backend_name); + bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); + // if tae_backend_name is empty, it will use the same backend as vae + bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; + + // if some backend is not specified or is the same as the default backend, use the default backend + bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default; + + if (use_default_backend) { + backend = init_named_backend(override_default_backend_name); + LOG_DEBUG("Loaded default backend %s", ggml_backend_name(backend)); + } + + if (!diffusion_backend_is_default) { + diffusion_backend = init_named_backend(diffusion_backend_name); + LOG_INFO("Using diffusion backend: %s", ggml_backend_name(diffusion_backend)); + } else { + diffusion_backend = backend; + } ModelLoader model_loader; @@ -422,21 +456,19 @@ class StableDiffusionGGML { LOG_INFO("Using circular padding for convolutions"); } - bool clip_on_cpu = sd_ctx_params->keep_clip_on_cpu; - { clip_backend = backend; - if (clip_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_INFO("CLIP: Using CPU backend"); - clip_backend = ggml_backend_cpu_init(); + if (!clip_backend_is_default) { + clip_backend = init_named_backend(clip_backend_name); + LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); } if (sd_version_is_sd3(version)) { cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map); } else if (sd_version_is_flux(version)) { bool is_chroma = false; for (auto pair : tensor_storage_map) { @@ -472,7 +504,7 @@ class StableDiffusionGGML { offload_params_to_cpu, tensor_storage_map); } - diffusion_model = std::make_shared(backend, + diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, version, @@ -483,11 +515,11 @@ class StableDiffusionGGML { offload_params_to_cpu, tensor_storage_map, version); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - version, - sd_ctx_params->chroma_use_dit_mask); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + version, + sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_wan(version)) { cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, @@ -495,13 +527,13 @@ class StableDiffusionGGML { true, 1, true); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version); if (strlen(SAFE_STR(sd_ctx_params->high_noise_diffusion_model_path)) > 0) { - high_noise_diffusion_model = std::make_shared(backend, + high_noise_diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, "model.high_noise_diffusion_model", @@ -527,12 +559,12 @@ class StableDiffusionGGML { version, "", enable_vision); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version, - sd_ctx_params->qwen_image_zero_cond_t); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version, + sd_ctx_params->qwen_image_zero_cond_t); } else if (sd_version_is_anima(version)) { cond_stage_model = std::make_shared(clip_backend, offload_params_to_cpu, @@ -546,11 +578,11 @@ class StableDiffusionGGML { offload_params_to_cpu, tensor_storage_map, version); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version); } else { // SD1.x SD2.x SDXL std::map embbeding_map; for (uint32_t i = 0; i < sd_ctx_params->embedding_count; i++) { @@ -570,7 +602,7 @@ class StableDiffusionGGML { embbeding_map, version); } - diffusion_model = std::make_shared(backend, + diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, version); @@ -595,18 +627,22 @@ class StableDiffusionGGML { high_noise_diffusion_model->get_param_tensors(tensors); } - if (sd_ctx_params->keep_vae_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_INFO("VAE Autoencoder: Using CPU backend"); - vae_backend = ggml_backend_cpu_init(); - } else { - vae_backend = backend; + vae_backend = backend; + if (!vae_backend_is_default) { + vae_backend = init_named_backend(vae_backend_name); + LOG_INFO("VAE Autoencoder: Using %s backend", ggml_backend_name(vae_backend)); + } + tae_backend = vae_backend; + if (tae_backend_name.length() > 0 && tae_backend_name != vae_backend_name) { + tae_backend = init_named_backend(tae_backend_name); + LOG_INFO("Tiny Autoencoder: Using %s backend", ggml_backend_name(tae_backend)); } auto create_tae = [&]() -> std::shared_ptr { if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { - return std::make_shared(vae_backend, + return std::make_shared(tae_backend, offload_params_to_cpu, tensor_storage_map, "decoder", @@ -614,7 +650,7 @@ class StableDiffusionGGML { version); } else { - auto model = std::make_shared(vae_backend, + auto model = std::make_shared(tae_backend, offload_params_to_cpu, tensor_storage_map, "decoder.layers", @@ -688,9 +724,9 @@ class StableDiffusionGGML { if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) { ggml_backend_t controlnet_backend = nullptr; - if (sd_ctx_params->keep_control_net_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_DEBUG("ControlNet: Using CPU backend"); - controlnet_backend = ggml_backend_cpu_init(); + if (!control_net_backend_is_default) { + control_net_backend = init_named_backend(control_net_backend_name); + LOG_INFO("ControlNet: Using %s backend", control_net_backend_name); } else { controlnet_backend = backend; } @@ -859,7 +895,7 @@ class StableDiffusionGGML { total_params_vram_size += clip_params_mem_size + pmid_params_mem_size; } - if (ggml_backend_is_cpu(backend)) { + if (ggml_backend_is_cpu(diffusion_backend)) { total_params_ram_size += unet_params_mem_size; } else { total_params_vram_size += unet_params_mem_size; @@ -2127,9 +2163,6 @@ void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) { sd_ctx_params->lora_apply_mode = LORA_APPLY_AUTO; sd_ctx_params->offload_params_to_cpu = false; sd_ctx_params->enable_mmap = false; - sd_ctx_params->keep_clip_on_cpu = false; - sd_ctx_params->keep_control_net_on_cpu = false; - sd_ctx_params->keep_vae_on_cpu = false; sd_ctx_params->diffusion_flash_attn = false; sd_ctx_params->circular_x = false; sd_ctx_params->circular_y = false; @@ -2143,7 +2176,7 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { if (!buf) return nullptr; buf[0] = '\0'; - + // TODO devices snprintf(buf + strlen(buf), 4096 - strlen(buf), "model_path: %s\n" "clip_l_path: %s\n" @@ -2167,9 +2200,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { "sampler_rng_type: %s\n" "prediction: %s\n" "offload_params_to_cpu: %s\n" - "keep_clip_on_cpu: %s\n" - "keep_control_net_on_cpu: %s\n" - "keep_vae_on_cpu: %s\n" "flash_attn: %s\n" "diffusion_flash_attn: %s\n" "circular_x: %s\n" @@ -2199,9 +2229,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { sd_rng_type_name(sd_ctx_params->sampler_rng_type), sd_prediction_name(sd_ctx_params->prediction), BOOL_STR(sd_ctx_params->offload_params_to_cpu), - BOOL_STR(sd_ctx_params->keep_clip_on_cpu), - BOOL_STR(sd_ctx_params->keep_control_net_on_cpu), - BOOL_STR(sd_ctx_params->keep_vae_on_cpu), BOOL_STR(sd_ctx_params->flash_attn), BOOL_STR(sd_ctx_params->diffusion_flash_attn), BOOL_STR(sd_ctx_params->circular_x), From 2ff3f56330b2e95a04af100fc73a64de0073eab7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 9 Jan 2026 22:59:31 +0100 Subject: [PATCH 02/23] fix build --- src/stable-diffusion.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 8202e43aa..3f1df052d 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -726,7 +726,7 @@ class StableDiffusionGGML { ggml_backend_t controlnet_backend = nullptr; if (!control_net_backend_is_default) { control_net_backend = init_named_backend(control_net_backend_name); - LOG_INFO("ControlNet: Using %s backend", control_net_backend_name); + LOG_INFO("ControlNet: Using %s backend", ggml_backend_name(controlnet_backend)); } else { controlnet_backend = backend; } From 7df124897e90c6e1e98944bd1028d48d17852b7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 11 Jan 2026 20:43:16 +0100 Subject: [PATCH 03/23] show backend device description --- src/stable-diffusion.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 3f1df052d..d8f8e3085 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -181,7 +181,8 @@ class StableDiffusionGGML { // TODO: expose via C API and fill a cstr const int device_count = ggml_backend_dev_count(); for (int i = 0; i < device_count; i++) { - LOG_INFO("%s", ggml_backend_dev_name(ggml_backend_dev_get(i))); + auto dev = ggml_backend_dev_get(i); + LOG_INFO("%s (%s)", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev)); } } From 6c41a6ef20240921ba2ab2b44f722d8aa7ab1cc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 11 Jan 2026 21:36:43 +0100 Subject: [PATCH 04/23] CLI: add --list-devices arg --- examples/cli/main.cpp | 35 ++++++++++++++- examples/common/common.hpp | 14 +++++- include/stable-diffusion.h | 5 +++ src/stable-diffusion.cpp | 90 ++++++++++++++++++++++++++++++++++++-- 4 files changed, 138 insertions(+), 6 deletions(-) diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index ddb88c971..d7e2a7458 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -46,6 +46,7 @@ struct SDCliParams { bool color = false; bool normal_exit = false; + bool skip_usage = false; ArgOptions get_options() { ArgOptions options; @@ -143,7 +144,27 @@ struct SDCliParams { auto on_help_arg = [&](int argc, const char** argv, int index) { normal_exit = true; - return -1; + return VALID_BREAK_OPT; + }; + + auto on_rpc_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; + } + const char* rpc_device = argv[index]; + add_rpc_device(rpc_device); + return 1; + }; + + auto on_list_devices_arg = [&](int argc, const char** argv, int index) { + size_t buff_size = backend_list_size(); + char* buff = (char*)malloc(buff_size); + list_backends_to_buffer(buff, buff_size); + printf("List of available GGML devices:\nName\tDescription\n-------------------\n%s\n", buff); + free(buff); + normal_exit = true; + skip_usage = true; + return VALID_BREAK_OPT; }; options.manual_options = { @@ -159,6 +180,14 @@ struct SDCliParams { "--help", "show this help message and exit", on_help_arg}, + {"", + "--rpc", + "add a rpc device", + on_rpc_arg}, + {"", + "--list-devices", + "list available ggml compute devices", + on_list_devices_arg}, }; return options; @@ -213,7 +242,9 @@ void parse_args(int argc, const char** argv, SDCliParams& cli_params, SDContextP std::vector options_vec = {cli_params.get_options(), ctx_params.get_options(), gen_params.get_options()}; if (!parse_options(argc, argv, options_vec)) { - print_usage(argc, argv, options_vec); + if (!cli_params.skip_usage){ + print_usage(argc, argv, options_vec); + } exit(cli_params.normal_exit ? 0 : 1); } diff --git a/examples/common/common.hpp b/examples/common/common.hpp index c3df7467d..932f7aece 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -34,6 +34,8 @@ namespace fs = std::filesystem; #define SAFE_STR(s) ((s) ? (s) : "") #define BOOL_STR(b) ((b) ? "true" : "false") +#define VALID_BREAK_OPT -42 + const char* modes_str[] = { "img_gen", "vid_gen", @@ -401,16 +403,26 @@ static bool parse_options(int argc, const char** argv, const std::vector string_split(const std::string & input, char separator) +{ + std::vector parts; + size_t begin_pos = 0; + size_t separator_pos = input.find(separator); + while (separator_pos != std::string::npos) { + std::string part = input.substr(begin_pos, separator_pos - begin_pos); + parts.emplace_back(part); + begin_pos = separator_pos + 1; + separator_pos = input.find(separator, begin_pos); + } + parts.emplace_back(input.substr(begin_pos, separator_pos - begin_pos)); + return parts; +} + +static void add_rpc_devices(const std::string & servers) { + auto rpc_servers = string_split(servers, ','); + if (rpc_servers.empty()) { + throw std::invalid_argument("no RPC servers specified"); + } + ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC"); + if (!rpc_reg) { + throw std::invalid_argument("failed to find RPC backend"); + } + typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char * endpoint); + ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server"); + if (!ggml_backend_rpc_add_server_fn) { + throw std::invalid_argument("failed to find RPC add server function"); + } + for (const auto & server : rpc_servers) { + auto reg = ggml_backend_rpc_add_server_fn(server.c_str()); + ggml_backend_register(reg); + } +} + +void add_rpc_device(const char* servers_cstr){ + std::string servers(servers_cstr); + add_rpc_devices(servers); +} + +std::vector> list_backends_vector() { + std::vector> backends; + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + auto dev = ggml_backend_dev_get(i); + backends.push_back({ggml_backend_dev_name(dev), ggml_backend_dev_description(dev)}); + } + return backends; +} + +SD_API size_t backend_list_size(){ + // for C API + size_t buffer_size = 0; + auto backends = list_backends_vector(); + for (auto& backend : backends) { + auto dev_name_size = backend.first.size(); + auto dev_desc_size = backend.second.size(); + buffer_size+=dev_name_size+dev_desc_size+2; // +2 for the separators + } + return buffer_size; +} + +// devices are separated by \n and name and description are separated by \t +SD_API void list_backends_to_buffer(char* buffer, size_t buffer_size) { + auto backends = list_backends_vector(); + size_t offset = 0; + for (auto& backend : backends) { + size_t name_size = backend.first.size(); + size_t desc_size = backend.second.size(); + if (offset + name_size + desc_size + 2 > buffer_size) { + break; // Not enough space in the buffer + } + memcpy(buffer + offset, backend.first.c_str(), name_size); + offset += name_size; + buffer[offset++] = '\t'; + memcpy(buffer + offset, backend.second.c_str(), desc_size); + offset += desc_size; + buffer[offset++] = '\n'; + } + if (offset < buffer_size) { + buffer[offset] = '\0'; // Ensure the buffer is null-terminated at the end + } +} + /*=============================================== StableDiffusionGGML ================================================*/ class StableDiffusionGGML { @@ -177,8 +261,8 @@ class StableDiffusionGGML { ggml_backend_free(backend); } - void list_backends() { - // TODO: expose via C API and fill a cstr + + void log_backends() { const int device_count = ggml_backend_dev_count(); for (int i = 0; i < device_count; i++) { auto dev = ggml_backend_dev_get(i); @@ -249,7 +333,7 @@ class StableDiffusionGGML { ggml_log_set(ggml_log_callback_default, nullptr); - list_backends(); + log_backends(); std::string default_backend_name = get_default_backend_name(); From e80ec5bde9d60b6bf1d8eb10e8eb686e6f206d7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 00:46:44 +0100 Subject: [PATCH 05/23] null-terminate even if buffer is too small --- src/stable-diffusion.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 90ba01051..ef0ffc702 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -181,6 +181,9 @@ SD_API void list_backends_to_buffer(char* buffer, size_t buffer_size) { } if (offset < buffer_size) { buffer[offset] = '\0'; // Ensure the buffer is null-terminated at the end + } else { + LOG_WARN("Provided buffer size is too small to contain details of all devices."); + buffer[buffer_size - 1] = '\0'; // Ensure the buffer is null-terminated at the end } } From da4bc53259aea14e7908994e37c690f919bdbad7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 01:03:37 +0100 Subject: [PATCH 06/23] move stuff to ggml_extend.cpp --- src/ggml_extend.hpp | 36 ++++++++++++++++++++++++++++++++++++ src/stable-diffusion.cpp | 36 ------------------------------------ 2 files changed, 36 insertions(+), 36 deletions(-) diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 859270cbd..d6f3242c2 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -91,6 +91,42 @@ __STATIC_INLINE__ void ggml_log_callback_default(ggml_log_level level, const cha } } +__STATIC_INLINE__ bool backend_name_exists(std::string name) { + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { + return true; + } + } + return false; +} + +__STATIC_INLINE__ std::string sanitize_backend_name(std::string name) { + if (name == "" || backend_name_exists(name)) { + return name; + } else { + LOG_WARN("Backend %s not found, using default backend", name.c_str()); + return ""; + } +} + +__STATIC_INLINE__ std::string get_default_backend_name() { + // should pick the same backend as ggml_backend_init_best + ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + return ggml_backend_dev_name(dev); +} + +__STATIC_INLINE__ ggml_backend_t init_named_backend(std::string name = "") { + LOG_DEBUG("Initializing backend: %s", name.c_str()); + if (name.empty()) { + return ggml_backend_init_best(); + } else { + return ggml_backend_init_by_name(name.c_str(), nullptr); + } +} + static_assert(GGML_MAX_NAME >= 128, "GGML_MAX_NAME must be at least 128"); // n-mode tensor-matrix product diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index ef0ffc702..94eaac62a 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -273,42 +273,6 @@ class StableDiffusionGGML { } } - bool backend_name_exists(std::string name) { - const int device_count = ggml_backend_dev_count(); - for (int i = 0; i < device_count; i++) { - if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { - return true; - } - } - return false; - } - - std::string sanitize_backend_name(std::string name) { - if (name == "" || backend_name_exists(name)) { - return name; - } else { - LOG_WARN("Backend %s not found, using default backend", name.c_str()); - return ""; - } - } - - std::string get_default_backend_name() { - // should pick the same backend as ggml_backend_init_best - ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); - dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); - dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); - return ggml_backend_dev_name(dev); - } - - ggml_backend_t init_named_backend(std::string name = "") { - LOG_DEBUG("Initializing backend: %s", name.c_str()); - if (name.empty()) { - return ggml_backend_init_best(); - } else { - return ggml_backend_init_by_name(name.c_str(), nullptr); - } - } - std::shared_ptr get_rng(rng_type_t rng_type) { if (rng_type == STD_DEFAULT_RNG) { return std::make_shared(); From 5a39ab9fc38d6df9319a286eed2c8fcf2b1615b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 01:06:53 +0100 Subject: [PATCH 07/23] --upscaler-backend-device --- examples/cli/main.cpp | 3 ++- examples/common/common.hpp | 7 +++++++ include/stable-diffusion.h | 3 ++- src/upscaler.cpp | 38 +++++++++++--------------------------- 4 files changed, 22 insertions(+), 29 deletions(-) diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index d7e2a7458..93abe342c 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -829,7 +829,8 @@ int main(int argc, const char* argv[]) { ctx_params.offload_params_to_cpu, ctx_params.diffusion_conv_direct, ctx_params.n_threads, - gen_params.upscale_tile_size); + gen_params.upscale_tile_size, + ctx_params.upscaler_backend_device.c_str()); if (upscaler_ctx == nullptr) { LOG_ERROR("new_upscaler_ctx failed"); diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 932f7aece..cfa9c740e 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -465,6 +465,8 @@ struct SDContextParams { std::string vae_backend_device; std::string tae_backend_device; std::string control_net_backend_device; + std::string upscaler_backend_device; + std::map embedding_map; std::vector embedding_vec; @@ -602,6 +604,11 @@ struct SDContextParams { "--control-net-backend-device", "device to use for control net (defaults to main-backend-device)", &control_net_backend_device}, + {"", + "--upscaler-backend-device", + "device to use for upscaling models (defaults to main-backend-device)", + &upscaler_backend_device}, + }; diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 2bdef5e87..93c194103 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -395,7 +395,8 @@ SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path, bool offload_params_to_cpu, bool direct, int n_threads, - int tile_size); + int tile_size, + const char * device); SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx); SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, diff --git a/src/upscaler.cpp b/src/upscaler.cpp index 03f7714e5..4d5b91548 100644 --- a/src/upscaler.cpp +++ b/src/upscaler.cpp @@ -23,37 +23,20 @@ struct UpscalerGGML { bool load_from_file(const std::string& esrgan_path, bool offload_params_to_cpu, - int n_threads) { + int n_threads, + std::string device = "") { ggml_log_set(ggml_log_callback_default, nullptr); -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - backend = ggml_backend_vk_init(0); -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - backend = ggml_backend_opencl_init(); -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif + device = sanitize_backend_name(device); + backend = init_named_backend(device); ModelLoader model_loader; if (!model_loader.init_from_file_and_convert_name(esrgan_path)) { LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str()); } model_loader.set_wtype_override(model_data_type); - if (!backend) { - LOG_DEBUG("Using CPU backend"); - backend = ggml_backend_cpu_init(); - } + // if (!backend) { + // LOG_DEBUG("Using CPU backend"); + // backend = ggml_backend_cpu_init(); + // } LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); esrgan_upscaler = std::make_shared(backend, offload_params_to_cpu, tile_size, model_loader.get_tensor_storage_map()); if (direct) { @@ -129,7 +112,8 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, bool offload_params_to_cpu, bool direct, int n_threads, - int tile_size) { + int tile_size, + const char* device) { upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t)); if (upscaler_ctx == nullptr) { return nullptr; @@ -141,7 +125,7 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, return nullptr; } - if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads)) { + if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads, SAFE_STR(device))) { delete upscaler_ctx->upscaler; upscaler_ctx->upscaler = nullptr; free(upscaler_ctx); From 00ba095d2a95a266908585c138343df0fda05249 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 02:00:34 +0100 Subject: [PATCH 08/23] use diffusion_backend for loading LoRAs --- src/stable-diffusion.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 94eaac62a..7ec0e3cda 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -808,7 +808,7 @@ class StableDiffusionGGML { version); } if (strlen(SAFE_STR(sd_ctx_params->photo_maker_path)) > 0) { - pmid_lora = std::make_shared("pmid", backend, sd_ctx_params->photo_maker_path, "", version); + pmid_lora = std::make_shared("pmid", diffusion_backend, sd_ctx_params->photo_maker_path, "", version); auto lora_tensor_filter = [&](const std::string& tensor_name) { if (starts_with(tensor_name, "lora.model")) { return true; @@ -1160,8 +1160,11 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { int64_t t0 = ggml_time_ms(); - - auto lora = load_lora_model_from_file(kv.first, kv.second, backend); + // TODO: Fix that + if(diffusion_backend!=clip_backend && !ggml_backend_is_cpu(clip_backend)){ + LOG_WARN("Diffusion models and text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); + } + auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend); if (!lora || lora->lora_tensors.empty()) { continue; } @@ -1251,7 +1254,7 @@ class StableDiffusionGGML { const std::string& lora_name = kv.first; float multiplier = kv.second; - auto lora = load_lora_model_from_file(lora_name, multiplier, backend, lora_tensor_filter); + auto lora = load_lora_model_from_file(lora_name, multiplier, diffusion_backend, lora_tensor_filter); if (lora && !lora->lora_tensors.empty()) { lora->preprocess_lora_tensors(tensors); diffusion_lora_models.push_back(lora); From 24b0452a302dd47891239b4d02cb634f3870dee7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 02:19:45 +0100 Subject: [PATCH 09/23] --photomaker-backend-device (+fixes) --- examples/common/common.hpp | 7 ++++++- include/stable-diffusion.h | 1 + src/stable-diffusion.cpp | 26 +++++++++++++++++--------- 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index cfa9c740e..333108ad7 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -466,7 +466,7 @@ struct SDContextParams { std::string tae_backend_device; std::string control_net_backend_device; std::string upscaler_backend_device; - + std::string photomaker_backend_device; std::map embedding_map; std::vector embedding_vec; @@ -608,6 +608,10 @@ struct SDContextParams { "--upscaler-backend-device", "device to use for upscaling models (defaults to main-backend-device)", &upscaler_backend_device}, + {"", + "--photomaker-backend-device", + "device to use for photomaker (defaults to main-backend-device)", + &photomaker_backend_device}, }; @@ -1021,6 +1025,7 @@ struct SDContextParams { vae_backend_device.c_str(), tae_backend_device.c_str(), control_net_backend_device.c_str(), + photomaker_backend_device.c_str(), }; return sd_ctx_params; } diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 93c194103..920b3cb3d 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -207,6 +207,7 @@ typedef struct { const char* vae_device; const char* tae_device; const char* control_net_device; + const char* photomaker_device; } sd_ctx_params_t; typedef struct { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 7ec0e3cda..3b08cb967 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -197,6 +197,7 @@ class StableDiffusionGGML { ggml_backend_t control_net_backend = nullptr; ggml_backend_t vae_backend = nullptr; ggml_backend_t tae_backend = nullptr; + ggml_backend_t pmid_backend = nullptr; // TODO: clip_vision and photomaker backends @@ -316,6 +317,7 @@ class StableDiffusionGGML { std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); + std::string pmid_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->photomaker_device)); bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); @@ -323,9 +325,10 @@ class StableDiffusionGGML { bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); // if tae_backend_name is empty, it will use the same backend as vae bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; + bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); // if some backend is not specified or is the same as the default backend, use the default backend - bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default; + bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default; if (use_default_backend) { backend = init_named_backend(override_default_backend_name); @@ -775,14 +778,13 @@ class StableDiffusionGGML { } if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) { - ggml_backend_t controlnet_backend = nullptr; if (!control_net_backend_is_default) { control_net_backend = init_named_backend(control_net_backend_name); - LOG_INFO("ControlNet: Using %s backend", ggml_backend_name(controlnet_backend)); + LOG_INFO("ControlNet: Using %s backend", ggml_backend_name(control_net_backend)); } else { - controlnet_backend = backend; + control_net_backend = backend; } - control_net = std::make_shared(controlnet_backend, + control_net = std::make_shared(control_net_backend, offload_params_to_cpu, tensor_storage_map, version); @@ -791,9 +793,15 @@ class StableDiffusionGGML { control_net->set_conv2d_direct_enabled(true); } } - + pmid_backend = backend; + if (!pmid_backend_is_default) { + pmid_backend = init_named_backend(pmid_backend_name); + LOG_INFO("PhotoMaker: Using %s backend", ggml_backend_name(pmid_backend)); + } else { + pmid_backend = backend; + } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { - pmid_model = std::make_shared(backend, + pmid_model = std::make_shared(pmid_backend, offload_params_to_cpu, tensor_storage_map, "pmid", @@ -801,7 +809,7 @@ class StableDiffusionGGML { PM_VERSION_2); LOG_INFO("using PhotoMaker Version 2"); } else { - pmid_model = std::make_shared(backend, + pmid_model = std::make_shared(pmid_backend, offload_params_to_cpu, tensor_storage_map, "pmid", @@ -981,7 +989,7 @@ class StableDiffusionGGML { control_net_params_mem_size / 1024.0 / 1024.0, ggml_backend_is_cpu(control_net_backend) ? "RAM" : "VRAM", pmid_params_mem_size / 1024.0 / 1024.0, - ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM"); + ggml_backend_is_cpu(pmid_backend) ? "RAM" : "VRAM"); } // init denoiser From 3a43b69e87b2ea78c76b41d5f597656d2ea53f6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 16 Jan 2026 02:28:37 +0100 Subject: [PATCH 10/23] --vision-backend-device --- examples/common/common.hpp | 7 ++++++- include/stable-diffusion.h | 1 + src/stable-diffusion.cpp | 10 ++++++---- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 333108ad7..750dabf05 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -467,6 +467,7 @@ struct SDContextParams { std::string control_net_backend_device; std::string upscaler_backend_device; std::string photomaker_backend_device; + std::string vision_backend_device; std::map embedding_map; std::vector embedding_vec; @@ -612,7 +613,10 @@ struct SDContextParams { "--photomaker-backend-device", "device to use for photomaker (defaults to main-backend-device)", &photomaker_backend_device}, - + {"", + "--vision-backend-device", + "device to use for clip-vision model (defaults to clip-backend-device)", + &vision_backend_device}, }; @@ -1026,6 +1030,7 @@ struct SDContextParams { tae_backend_device.c_str(), control_net_backend_device.c_str(), photomaker_backend_device.c_str(), + vision_backend_device.c_str(), }; return sd_ctx_params; } diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 920b3cb3d..521875034 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -208,6 +208,7 @@ typedef struct { const char* tae_device; const char* control_net_device; const char* photomaker_device; + const char* vision_device; } sd_ctx_params_t; typedef struct { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 3b08cb967..b717d4390 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -198,8 +198,7 @@ class StableDiffusionGGML { ggml_backend_t vae_backend = nullptr; ggml_backend_t tae_backend = nullptr; ggml_backend_t pmid_backend = nullptr; - - // TODO: clip_vision and photomaker backends + ggml_backend_t vision_backend = nullptr; SDVersion version; bool vae_decode_only = false; @@ -318,6 +317,7 @@ class StableDiffusionGGML { std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); std::string pmid_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->photomaker_device)); + std::string vision_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vision_device)); bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); @@ -326,9 +326,11 @@ class StableDiffusionGGML { // if tae_backend_name is empty, it will use the same backend as vae bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); + // if vision_backend_name is empty, it will use the same backend as clip + bool vision_backend_is_default = (vision_backend_name.empty() && clip_backend_is_default) || vision_backend_name == default_backend_name; // if some backend is not specified or is the same as the default backend, use the default backend - bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default; + bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default || vision_backend_is_default; if (use_default_backend) { backend = init_named_backend(override_default_backend_name); @@ -597,7 +599,7 @@ class StableDiffusionGGML { if (diffusion_model->get_desc() == "Wan2.1-I2V-14B" || diffusion_model->get_desc() == "Wan2.1-FLF2V-14B" || diffusion_model->get_desc() == "Wan2.1-I2V-1.3B") { - clip_vision = std::make_shared(backend, + clip_vision = std::make_shared(vision_backend, offload_params_to_cpu, tensor_storage_map); clip_vision->alloc_params_buffer(); From 720dd89f1b81fe59ec8d8d59b58987a42d38ba44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 21 Jan 2026 20:37:34 +0100 Subject: [PATCH 11/23] check backends at runtime --- src/common_block.hpp | 8 +++++--- src/ggml_extend.hpp | 28 ++++++++-------------------- src/model.cpp | 12 ------------ src/qwen_image.hpp | 9 ++++++--- src/z_image.hpp | 19 ++++++++++++------- 5 files changed, 31 insertions(+), 45 deletions(-) diff --git a/src/common_block.hpp b/src/common_block.hpp index 2cef389af..476c9499a 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -248,9 +248,6 @@ class FeedForward : public GGMLBlock { float scale = 1.f; if (precision_fix) { scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif } // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using Vulkan without enabling force_prec_f32, @@ -264,6 +261,11 @@ class FeedForward : public GGMLBlock { auto net_0 = std::dynamic_pointer_cast(blocks["net.0"]); auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); + #ifdef SD_USE_VULKAN + if(ggml_backend_is_vk(ctx->backend)){ + net_2->set_force_prec_32(true); + } + #endif x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim] x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out] diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index d6f3242c2..2bc1ad48a 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -30,26 +30,6 @@ #include "model.h" #include "tensor.hpp" -#ifdef SD_USE_CUDA -#include "ggml-cuda.h" -#endif - -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif - -#ifdef SD_USE_SYCL -#include "ggml-sycl.h" -#endif - #include "rng.hpp" #include "tensor_ggml.hpp" #include "util.h" @@ -2372,6 +2352,14 @@ class Linear : public UnaryBlock { force_prec_f32(force_prec_f32), scale(scale) {} + void set_scale(float scale_){ + scale = scale_; + } + + void set_force_prec_32(bool force_prec_f32_){ + force_prec_f32 = force_prec_f32_; + } + ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) { ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; diff --git a/src/model.cpp b/src/model.cpp index 2c708ed6a..37f52c682 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -25,18 +25,6 @@ #include "name_conversion.h" #include "stable-diffusion.h" -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif - #define ST_HEADER_SIZE_LEN 8 uint64_t read_u64(uint8_t* buffer) { diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index 83c8cec66..fcf8df305 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -95,9 +95,7 @@ namespace Qwen { float scale = 1.f / 32.f; bool force_prec_f32 = false; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example when using CUDA but the weights are k-quants (not all prompts). blocks["to_out.0"] = std::shared_ptr(new Linear(inner_dim, out_dim, out_bias, false, force_prec_f32, scale)); @@ -123,6 +121,11 @@ namespace Qwen { auto to_k = std::dynamic_pointer_cast(blocks["to_k"]); auto to_v = std::dynamic_pointer_cast(blocks["to_v"]); auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); +#ifdef SD_USE_VULKAN + if(ggml_backend_is_vk(ctx->backend)){ + to_out_0->set_force_prec_32(true); + } +#endif auto norm_added_q = std::dynamic_pointer_cast(blocks["norm_added_q"]); auto norm_added_k = std::dynamic_pointer_cast(blocks["norm_added_k"]); diff --git a/src/z_image.hpp b/src/z_image.hpp index 363ce5f4f..a29ede990 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -31,10 +31,6 @@ namespace ZImage { : head_dim(head_dim), num_heads(num_heads), num_kv_heads(num_kv_heads), qk_norm(qk_norm) { blocks["qkv"] = std::make_shared(hidden_size, (num_heads + num_kv_heads * 2) * head_dim, false); float scale = 1.f; -#if GGML_USE_HIP - // Prevent NaN issues with certain ROCm setups - scale = 1.f / 16.f; -#endif blocks["out"] = std::make_shared(num_heads * head_dim, hidden_size, false, false, false, scale); if (qk_norm) { blocks["q_norm"] = std::make_shared(head_dim); @@ -51,6 +47,12 @@ namespace ZImage { int64_t N = x->ne[2]; auto qkv_proj = std::dynamic_pointer_cast(blocks["qkv"]); auto out_proj = std::dynamic_pointer_cast(blocks["out"]); +#if GGML_USE_HIP + // Prevent NaN issues with certain ROCm setups + if (ggml_backend_is_cuda(ctx->backend)) { + out_proj->set_scale(1.f / 16.f); + } +#endif auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim] qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim] @@ -115,9 +117,7 @@ namespace ZImage { bool force_prec_f32 = false; float scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using CUDA but the weights are k-quants. blocks["w2"] = std::make_shared(hidden_dim, dim, false, false, force_prec_f32, scale); @@ -128,6 +128,11 @@ namespace ZImage { auto w1 = std::dynamic_pointer_cast(blocks["w1"]); auto w2 = std::dynamic_pointer_cast(blocks["w2"]); auto w3 = std::dynamic_pointer_cast(blocks["w3"]); +#ifdef SD_USE_VULKAN + if(ggml_backend_is_vk(ctx->backend)){ + w2->set_force_prec_32(true); + } +#endif auto x1 = w1->forward(ctx, x); auto x3 = w3->forward(ctx, x); From c59eb261f34875afe0b614a5481705ad89ec5e14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 21 Jan 2026 20:51:26 +0100 Subject: [PATCH 12/23] fix missing includes --- src/common_block.hpp | 4 ++++ src/qwen_image.hpp | 4 ++++ src/z_image.hpp | 8 ++++++++ 3 files changed, 16 insertions(+) diff --git a/src/common_block.hpp b/src/common_block.hpp index 476c9499a..deddb6165 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -3,6 +3,10 @@ #include "ggml_extend.hpp" +#ifdef SD_USE_VULKAN +#include "ggml-vulkan.h" +#endif + class DownSampleBlock : public GGMLBlock { protected: int channels; diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index fcf8df305..518112b83 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -6,6 +6,10 @@ #include "common_block.hpp" #include "flux.hpp" +#ifdef SD_USE_VULKAN +#include "ggml-vulkan.h" +#endif + namespace Qwen { constexpr int QWEN_IMAGE_GRAPH_SIZE = 20480; diff --git a/src/z_image.hpp b/src/z_image.hpp index a29ede990..0e30652b3 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -7,6 +7,14 @@ #include "ggml_extend.hpp" #include "mmdit.hpp" +#ifdef SD_USE_VULKAN +#include "ggml-vulkan.h" +#endif + +#if GGML_USE_HIP +#include "ggml-cuda.h" +#endif + // Ref: https://github.com/Alpha-VLLM/Lumina-Image-2.0/blob/main/models/model.py // Ref: https://github.com/huggingface/diffusers/pull/12703 From 177c42e3977488fbc820eeef70680c29addb4256 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Fri, 23 Jan 2026 12:04:25 +0100 Subject: [PATCH 13/23] fix typo --- src/common_block.hpp | 2 +- src/ggml_extend.hpp | 2 +- src/qwen_image.hpp | 2 +- src/z_image.hpp | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/common_block.hpp b/src/common_block.hpp index deddb6165..a25fe9311 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -267,7 +267,7 @@ class FeedForward : public GGMLBlock { auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); #ifdef SD_USE_VULKAN if(ggml_backend_is_vk(ctx->backend)){ - net_2->set_force_prec_32(true); + net_2->set_force_prec_f32(true); } #endif diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 2bc1ad48a..9c1a27f68 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -2356,7 +2356,7 @@ class Linear : public UnaryBlock { scale = scale_; } - void set_force_prec_32(bool force_prec_f32_){ + void set_force_prec_f32(bool force_prec_f32_){ force_prec_f32 = force_prec_f32_; } diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index 518112b83..3ee370b08 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -127,7 +127,7 @@ namespace Qwen { auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); #ifdef SD_USE_VULKAN if(ggml_backend_is_vk(ctx->backend)){ - to_out_0->set_force_prec_32(true); + to_out_0->set_force_prec_f32(true); } #endif diff --git a/src/z_image.hpp b/src/z_image.hpp index 0e30652b3..6b41011e0 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -138,7 +138,7 @@ namespace ZImage { auto w3 = std::dynamic_pointer_cast(blocks["w3"]); #ifdef SD_USE_VULKAN if(ggml_backend_is_vk(ctx->backend)){ - w2->set_force_prec_32(true); + w2->set_force_prec_f32(true); } #endif From 67b53dafee417435af1cbe024c8e5461946f6914 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 25 Jan 2026 18:31:54 +0100 Subject: [PATCH 14/23] multiple clip backend devices fix sdxl conditionner backends fix sd3 backend display --- examples/common/common.hpp | 2 +- src/conditioner.hpp | 76 ++++++++++++++++++++++++----- src/stable-diffusion.cpp | 99 +++++++++++++++++++++++++++----------- 3 files changed, 137 insertions(+), 40 deletions(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 750dabf05..44ecc6c10 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -615,7 +615,7 @@ struct SDContextParams { &photomaker_backend_device}, {"", "--vision-backend-device", - "device to use for clip-vision model (defaults to clip-backend-device)", + "device to use for clip-vision model (defaults to main-backend-device)", &vision_backend_device}, }; diff --git a/src/conditioner.hpp b/src/conditioner.hpp index 05167cfd2..610039927 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -4,9 +4,12 @@ #include #include "clip.hpp" +#include "ggml-alloc.h" +#include "ggml-backend.h" #include "llm.hpp" #include "t5.hpp" #include "tensor_ggml.hpp" +#include "util.h" struct SDCondition { sd::Tensor c_crossattn; @@ -112,7 +115,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { std::vector token_embed_custom; std::map> embedding_pos_map; - FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend, + FrozenCLIPEmbedderWithCustomWords(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map, const std::map& orig_embedding_map, @@ -126,13 +129,27 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { tokenizer.add_special_token(name); } bool force_clip_f32 = !embedding_map.empty(); + + ggml_backend_t clip_backend = backends[0]; + if (sd_version_is_sd1(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, true, force_clip_f32); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, true, force_clip_f32); } else if (sd_version_is_sd2(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); + LOG_INFO("CLIP-H: using %s backend", ggml_backend_name(clip_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); } else if (sd_version_is_sdxl(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, false, force_clip_f32); - text_model2 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false, force_clip_f32); + ggml_backend_t clip_g_backend = clip_backend; + if (backends.size() >= 2){ + clip_g_backend = backends[1]; + if (backends.size() > 2) { + LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); + } + } + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_backend)); + LOG_INFO("CLIP-G: using %s backend", ggml_backend_name(clip_g_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, false, force_clip_f32); + text_model2 = std::make_shared(clip_g_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false, force_clip_f32); } } @@ -716,13 +733,29 @@ struct SD3CLIPEmbedder : public Conditioner { std::shared_ptr clip_g; std::shared_ptr t5; - SD3CLIPEmbedder(ggml_backend_t backend, + SD3CLIPEmbedder(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}) : clip_g_tokenizer(0) { bool use_clip_l = false; bool use_clip_g = false; bool use_t5 = false; + + ggml_backend_t clip_l_backend, clip_g_backend, t5_backend; + if (backends.size() == 1) { + clip_l_backend = clip_g_backend = t5_backend = backends[0]; + } else if (backends.size() == 2) { + clip_l_backend = clip_g_backend = backends[0]; + t5_backend = backends[1]; + } else if (backends.size() >= 3) { + clip_l_backend = backends[0]; + clip_g_backend = backends[1]; + t5_backend = backends[2]; + if (backends.size() > 3) { + LOG_WARN("More than 3 clip backends provided, but the model only supports 3 text encoders. Ignoring the rest."); + } + } + for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -737,13 +770,16 @@ struct SD3CLIPEmbedder : public Conditioner { return; } if (use_clip_l) { - clip_l = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_l_backend)); + clip_l = std::make_shared(clip_l_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false); } if (use_clip_g) { - clip_g = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false); + LOG_INFO("CLIP-G: using %s backend", ggml_backend_name(clip_g_backend)); + clip_g = std::make_shared(clip_g_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false); } if (use_t5) { - t5 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); + LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(t5_backend)); + t5 = std::make_shared(t5_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); } } @@ -1071,11 +1107,25 @@ struct FluxCLIPEmbedder : public Conditioner { std::shared_ptr t5; size_t chunk_len = 256; - FluxCLIPEmbedder(ggml_backend_t backend, + FluxCLIPEmbedder(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}) { bool use_clip_l = false; bool use_t5 = false; + + + ggml_backend_t clip_l_backend, t5_backend; + if (backends.size() == 1) { + clip_l_backend = t5_backend = backends[0]; + } else if (backends.size() >= 2) { + clip_l_backend = backends[0]; + t5_backend = backends[1]; + if (backends.size() > 2) { + LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); + } + } + + for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -1090,12 +1140,14 @@ struct FluxCLIPEmbedder : public Conditioner { } if (use_clip_l) { - clip_l = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_l_backend)); + clip_l = std::make_shared(clip_l_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true); } else { LOG_WARN("clip_l text encoder not found! Prompt adherence might be degraded."); } if (use_t5) { - t5 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); + LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(clip_l_backend)); + t5 = std::make_shared(t5_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); } else { LOG_WARN("t5xxl text encoder not found! Prompt adherence might be degraded."); } diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index b717d4390..5a8226e9e 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1,3 +1,4 @@ +#include "ggml-cpu.h" #include "ggml_extend.hpp" #include "model.h" @@ -5,6 +6,7 @@ #include "rng_mt19937.hpp" #include "rng_philox.hpp" #include "stable-diffusion.h" +#include #include "util.h" #include "auto_encoder_kl.hpp" @@ -140,6 +142,29 @@ void add_rpc_device(const char* servers_cstr){ add_rpc_devices(servers); } +std::vector sanitize_backend_name_list(std::string name) { + std::vector vec = {}; + if (name == "" || backend_name_exists(name)) { + // single backend + vec.push_back(name); + } else if (name.find(",") != std::string::npos) { + // comma-separated backend names + std::stringstream ss(name); + std::string token; + while (std::getline(ss, token, ',')) { + if (token == "" || backend_name_exists(token)) { + vec.push_back(token); + } else { + LOG_WARN("backend name %s not found, using default", token.c_str()); + vec.push_back(""); + } + } + } else { + vec.push_back(""); + } + return vec; +} + std::vector> list_backends_vector() { std::vector> backends; const int device_count = ggml_backend_dev_count(); @@ -193,13 +218,14 @@ class StableDiffusionGGML { public: ggml_backend_t backend = nullptr; // general backend ggml_backend_t diffusion_backend = nullptr; - ggml_backend_t clip_backend = nullptr; ggml_backend_t control_net_backend = nullptr; ggml_backend_t vae_backend = nullptr; ggml_backend_t tae_backend = nullptr; ggml_backend_t pmid_backend = nullptr; ggml_backend_t vision_backend = nullptr; + std::vector clip_backends = {nullptr}; + SDVersion version; bool vae_decode_only = false; bool external_vae_is_invalid = false; @@ -249,8 +275,10 @@ class StableDiffusionGGML { if (diffusion_backend != backend) { ggml_backend_free(diffusion_backend); } - if (clip_backend != backend) { - ggml_backend_free(clip_backend); + for(auto clip_backend : clip_backends) { + if (clip_backend != backend) { + ggml_backend_free(clip_backend); + } } if (control_net_backend != backend) { ggml_backend_free(control_net_backend); @@ -312,7 +340,7 @@ class StableDiffusionGGML { } std::string diffusion_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->diffusion_device)); - std::string clip_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->clip_device)); + std::vector clip_backend_names = sanitize_backend_name_list(SAFE_STR(sd_ctx_params->clip_device)); std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); @@ -320,17 +348,22 @@ class StableDiffusionGGML { std::string vision_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vision_device)); bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; - bool clip_backend_is_default = (clip_backend_name.empty() || clip_backend_name == default_backend_name); + bool clip_backends_are_default = true; + for (const auto& clip_backend_name : clip_backend_names) { + if (!clip_backend_name.empty() && clip_backend_name != default_backend_name) { + clip_backends_are_default = false; + break; + } + } bool control_net_backend_is_default = (control_net_backend_name.empty() || control_net_backend_name == default_backend_name); bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); // if tae_backend_name is empty, it will use the same backend as vae bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); - // if vision_backend_name is empty, it will use the same backend as clip - bool vision_backend_is_default = (vision_backend_name.empty() && clip_backend_is_default) || vision_backend_name == default_backend_name; + bool vision_backend_is_default = (vision_backend_name.empty() || vision_backend_name == default_backend_name); // if some backend is not specified or is the same as the default backend, use the default backend - bool use_default_backend = diffusion_backend_is_default || clip_backend_is_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default || vision_backend_is_default; + bool use_default_backend = diffusion_backend_is_default || clip_backends_are_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default || vision_backend_is_default; if (use_default_backend) { backend = init_named_backend(override_default_backend_name); @@ -514,13 +547,18 @@ class StableDiffusionGGML { } { - clip_backend = backend; - if (!clip_backend_is_default) { - clip_backend = init_named_backend(clip_backend_name); - LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); + if (!clip_backends_are_default) { + clip_backends.clear(); + for(auto clip_backend_name : clip_backend_names){ + auto clip_backend = init_named_backend(clip_backend_name); + LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); + clip_backends.push_back(clip_backend); + } + }else{ + clip_backends = {backend}; } if (sd_version_is_sd3(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map); diffusion_model = std::make_shared(diffusion_backend, @@ -544,20 +582,20 @@ class StableDiffusionGGML { "--chroma-disable-dit-mask as a workaround."); } - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, sd_ctx_params->chroma_use_t5_mask, sd_ctx_params->chroma_t5_mask_pad); } else if (version == VERSION_OVIS_IMAGE) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version, "", false); } else { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map); } @@ -568,7 +606,7 @@ class StableDiffusionGGML { sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_flux2(version)) { bool is_chroma = false; - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version); @@ -578,7 +616,7 @@ class StableDiffusionGGML { version, sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_wan(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, true, @@ -610,7 +648,7 @@ class StableDiffusionGGML { if (!vae_decode_only) { enable_vision = true; } - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version, @@ -631,7 +669,7 @@ class StableDiffusionGGML { tensor_storage_map, "model.diffusion_model"); } else if (sd_version_is_z_image(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version); @@ -646,14 +684,14 @@ class StableDiffusionGGML { embbeding_map.emplace(SAFE_STR(sd_ctx_params->embeddings[i].name), SAFE_STR(sd_ctx_params->embeddings[i].path)); } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map, embbeding_map, version, PM_VERSION_2); } else { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map, embbeding_map, @@ -951,7 +989,9 @@ class StableDiffusionGGML { size_t total_params_ram_size = 0; size_t total_params_vram_size = 0; - if (ggml_backend_is_cpu(clip_backend)) { + + // TODO: split by individual text encoders + if (ggml_backend_is_cpu(clip_backends[0])) { total_params_ram_size += clip_params_mem_size + pmid_params_mem_size; } else { total_params_vram_size += clip_params_mem_size + pmid_params_mem_size; @@ -983,7 +1023,8 @@ class StableDiffusionGGML { total_params_vram_size / 1024.0 / 1024.0, total_params_ram_size / 1024.0 / 1024.0, clip_params_mem_size / 1024.0 / 1024.0, - ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM", + // TODO: split + ggml_backend_is_cpu(clip_backends[0]) ? "RAM" : "VRAM", unet_params_mem_size / 1024.0 / 1024.0, ggml_backend_is_cpu(backend) ? "RAM" : "VRAM", vae_params_mem_size / 1024.0 / 1024.0, @@ -1171,7 +1212,11 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { int64_t t0 = ggml_time_ms(); // TODO: Fix that - if(diffusion_backend!=clip_backend && !ggml_backend_is_cpu(clip_backend)){ + bool are_clip_backends_compatible = true; + for (auto backend: clip_backends){ + are_clip_backends_compatible = are_clip_backends_compatible && (diffusion_backend==backend || ggml_backend_is_cpu(backend)); + } + if(!are_clip_backends_compatible){ LOG_WARN("Diffusion models and text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); } auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend); @@ -1231,8 +1276,8 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { const std::string& lora_id = kv.first; float multiplier = kv.second; - - auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backend, lora_tensor_filter); + //TODO: split by model + auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backends[0], lora_tensor_filter); if (lora && !lora->lora_tensors.empty()) { lora->preprocess_lora_tensors(tensors); cond_stage_lora_models.push_back(lora); From da8de1a5cbb930dd8481fd7241b3d7c289c6fa6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 18:48:03 +0100 Subject: [PATCH 15/23] update help message --- examples/common/common.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 44ecc6c10..66a6e6719 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -591,7 +591,7 @@ struct SDContextParams { &diffusion_backend_device}, {"", "--clip-backend-device", - "device to use for clip (defaults to main-backend-device)", + "device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with multiple encoders", &clip_backend_device}, {"", "--vae-backend-device", From e7da04fb1754e5f3e3890716384dbe5fb1024315 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 20:44:09 +0100 Subject: [PATCH 16/23] Add RPC documentation --- docs/rpc.md | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 docs/rpc.md diff --git a/docs/rpc.md b/docs/rpc.md new file mode 100644 index 000000000..0a9b7ccd2 --- /dev/null +++ b/docs/rpc.md @@ -0,0 +1,202 @@ +# Building and Using the RPC Server with `stable-diffusion.cpp` + +This guide covers how to build a version of the RPC server from `llama.cpp` that is compatible with your version of `stable-diffusion.cpp` to manage multi-backends setups. RPC allows you to offload specific model components to a remote server. + +> **Note on Model Location:** The model files (e.g., `.safetensors` or `.gguf`) remain on the **Client** machine. The client parses the file and transmits the necessary tensor data and computational graphs to the server. The server does not need to store the model files locally. + +## 1. Building `stable-diffusion.cpp` with RPC client + +First, you should build the client application from source. It requires `GGML_RPC=ON` to include the RPC backend to your client. +```bash +mkdir build +cd build +cmake .. \ + -DGGML_RPC=ON \ + # Add other build flags here (e.g., -DSD_VULKAN=ON) +cmake --build . --config Release -j $(nproc) +``` + +> **Note:** Ensure you add the other flags you would normally use (e.g., `-DSD_VULKAN=ON`, `-DSD_CUDA=ON`, `-DSD_HIPBLAS=ON`, or `-DGGML_METAL=ON`), for more information about building `stable-diffusion.cpp` from source, please refer to the `build.md` documentation. + +## 2. Ensure `llama.cpp` is at the correct commit + +`stable-diffusion.cpp`'s RPC client is designed to work with a specific version of `llama.cpp` (compatible with the `ggml` submodule) to ensure API compatibility. The commit hash for `llama.cpp` is stored in `ggml/scripts/sync-llama.last`. + +> **Start from Root:** Perform these steps from the root of your `stable-diffusion.cpp` directory. + +1. Read the target commit hash from the submodule tracker: + ```bash + # Linux / WSL / MacOS + HASH=$(cat ggml/scripts/sync-llama.last) + + # Windows (PowerShell) + $HASH = Get-Content -Path "ggml\scripts\sync-llama.last" + ``` + +2. Clone `llama.cpp` at the target commit . + ```bash + git clone https://github.com/ggml-org/llama.cpp.git + cd llama.cpp + git checkout $HASH + ``` + +To save on download time and storage, you can use a shallow clone to download only the target commit: + ```bash + mkdir -p llama.cpp + cd llama.cpp + git init + git remote add origin https://github.com/ggml-org/llama.cpp.git + git fetch --depth 1 origin $HASH + git checkout FETCH_HEAD + ``` + +## 3. Build `llama.cpp` (RPC Server) + +The RPC server acts as the worker. You must explicitly enable the **backend** (the hardware interface, such as CUDA for Nvidia, Metal for Apple Silicon, or Vulkan) when building, otherwise the server will default to using only the CPU. + +To find the correct flags, refer to the official documentation for the `llama.cpp` repository. + +> **Crucial:** You must include the compiler flags required to satisfy the API compatibility with `stable-diffusion.cpp` (`-DGGML_MAX_NAME=128`). Without this flag, `GGML_MAX_NAME` will default to `64` for the server, and data transfers between the client and server will fail. Of course, `-DGGML_RPC` must also be enabled. +> +> I recommend disabling the `LLAMA_CURL` flag to avoid unnecessary dependencies, and disabling shared library builds to avoid potential conflicts. + +> **Build Target:** We are specifically building the `rpc-server` target. This prevents the build system from compiling the entire `llama.cpp` suite (like `llama-cli`), making the build significantly faster. + +### Linux / WSL (Vulkan) +```bash +mkdir build +cd build +cmake .. -DGGML_RPC=ON \ + -DGGML_VULKAN=ON \ # Ensure backend is enabled + -DGGML_BUILD_SHARED_LIBS=OFF \ + -DLLAMA_CURL=OFF \ + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 \ + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server -j $(nproc) +``` + +### macOS (Metal) +```bash +mkdir build +cd build +cmake .. -DGGML_RPC=ON \ + -DGGML_METAL=ON \ + -DGGML_BUILD_SHARED_LIBS=OFF \ + -DLLAMA_CURL=OFF \ + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 \ + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server +``` + +### Windows (Visual Studio 2022, Vulkan) +```powershell +mkdir build +cd build +cmake .. -G "Visual Studio 17 2022" -A x64 ` + -DGGML_RPC=ON ` + -DGGML_VULKAN=ON ` + -DGGML_BUILD_SHARED_LIBS=OFF ` + -DLLAMA_CURL=OFF ` + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 ` + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server +``` + +## 4. Usage + +Once both applications are built, you can run the server and the client to manage your GPU allocation. + +### Step A: Run the RPC Server + +Start the server. It listens for connections on the default address (usually `localhost:50052`). If your server is on a different machine, ensure the server binds to the correct interface and your firewall allows the connection. + +**On the Server :** +If running on the same machine, you can use the default address: +```bash +./rpc-server +``` +If you want to allow connections from other machines on the network: +```bash +./rpc-server --host 0.0.0.0 +``` + +> **Security Warning:** The RPC server does not currently support authentication or encryption. **Only run the server on trusted local networks**. Never expose the RPC server directly to the open internet. + +> **Drivers & Hardware:** Ensure the Server machine has the necessary drivers installed and functional (e.g., Nvidia Drivers for CUDA, Vulkan SDK, or Metal). If no devices are found, the server will simply fallback to CPU usage. + +### Step B: Check if the client is able to connect to the server and see the available devices + +We're assuming the server is running on your local machine, and listening on the default port `50052`. If it's running on a different machine, you can replace `localhost` with the IP address of the server. + +**On the Client:** +```bash +./sd-cli --rpc localhost:50052 --list-devices +``` +If the server is running and the client is able to connect, you should see `RPC0 localhost:50052` in the list of devices. + +Example output: +(Client built without GPU acceleration, two GPUs available on the server) +``` +List of available GGML devices: +Name Description +------------------- +CPU AMD Ryzen 9 5900X 12-Core Processor +RPC0 localhost:50052 +RPC1 localhost:50052 +``` + +### Step C: Run with RPC device + +If everything is working correctly, you can now run the client while offloading some or all of the work to the RPC server. + +Example: Setting the main backend to the RPC0 device for doing all the work on the server. + +```bash +./sd-cli -m models/sd1.5.safetensors -p "A cat" --rpc localhost:50052 --main-backend-device RPC0 +``` + +--- + +## 5. Scaling: Multiple RPC Servers + +You can connect the client to multiple RPC servers simultaneously to scale out your hardware usage. + +Example: A main machine (192.168.1.10) with 3 GPUs, with one GPU running CUDA and the other two running Vulkan, and a second machine (192.168.1.11) only one GPU. + +**On the first machine (Running two server instances):** + +**Terminal 1 (CUDA):** +```bash +# Linux / macOS / WSL +export CUDA_VISIBLE_DEVICES=0 +./rpc-server-cuda --host 0.0.0.0 + +# Windows PowerShell +$env:CUDA_VISIBLE_DEVICES="0" +./rpc-server-cuda --host 0.0.0.0 +``` + +**Terminal 2 (Vulkan):** +```bash +./rpc-server-vulkan --host 0.0.0.0 --port 50053 -d Vulkan1,Vulkan2 +``` + +**On the second machine:** +```bash +./rpc-server --host 0.0.0.0 +``` + +**On the Client:** +Pass multiple server addresses separated by commas. + +```bash +./sd-cli --rpc 192.168.1.10:50052,192.168.1.10:50053,192.168.1.11:50052 --list-devices +``` + +The client will map these servers to sequential device IDs (e.g., RPC0 from the first server, RPC2, RPC3 from the second, and RPC4 from the third). With this setup, you could for example use RPC0 for the main backend, RPC1 and RPC2 for the text encoders, and RPC3 for the VAE. + +--- + +## 6. Performance Considerations + +RPC performance is heavily dependent on network bandwidth, as large weights and activations must be transferred back and forth over the network, especially for large models, or when using high resolutions. For best results, ensure your network connection is stable and has sufficient bandwidth (>1Gbps recommended). \ No newline at end of file From 72d7b6e18e1608035c410914da7d98542ef998cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 20:44:13 +0100 Subject: [PATCH 17/23] update docs --- examples/cli/README.md | 16 +++++++++++++--- examples/server/README.md | 14 +++++++++++--- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/examples/cli/README.md b/examples/cli/README.md index 904f3c441..38565cf9b 100644 --- a/examples/cli/README.md +++ b/examples/cli/README.md @@ -19,6 +19,8 @@ CLI Options: -M, --mode run mode, one of [img_gen, vid_gen, upscale, convert], default: img_gen --preview preview method. must be one of the following [none, proj, tae, vae] (default is none) -h, --help show this help message and exit + --rpc add a rpc device + --list-devices list available ggml compute devices Context Options: -m, --model path to full model @@ -41,6 +43,17 @@ Context Options: --tensor-type-rules weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0") --photo-maker path to PHOTOMAKER model --upscale-model path to esrgan model. + --main-backend-device default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise + cpu) + --diffusion-backend-device device to use for diffusion (defaults to main-backend-device) + --clip-backend-device device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with + multiple encoders + --vae-backend-device device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified + --tae-backend-device device to use for tae (defaults to vae-backend-device) + --control-net-backend-device device to use for control net (defaults to main-backend-device) + --upscaler-backend-device device to use for upscaling models (defaults to main-backend-device) + --photomaker-backend-device device to use for photomaker (defaults to main-backend-device) + --vision-backend-device device to use for clip-vision model (defaults to main-backend-device) -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma @@ -49,9 +62,6 @@ Context Options: --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --mmap whether to memory-map model - --control-net-cpu keep controlnet in cpu (for low vram) - --clip-on-cpu keep clip in cpu (for low vram) - --vae-on-cpu keep vae in cpu (for low vram) --fa use flash attention --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model diff --git a/examples/server/README.md b/examples/server/README.md index 8aa2158f5..5a44e6c4b 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -121,6 +121,17 @@ Context Options: --tensor-type-rules weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0") --photo-maker path to PHOTOMAKER model --upscale-model path to esrgan model. + --main-backend-device default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise + cpu) + --diffusion-backend-device device to use for diffusion (defaults to main-backend-device) + --clip-backend-device device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with + multiple encoders + --vae-backend-device device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified + --tae-backend-device device to use for tae (defaults to vae-backend-device) + --control-net-backend-device device to use for control net (defaults to main-backend-device) + --upscaler-backend-device device to use for upscaling models (defaults to main-backend-device) + --photomaker-backend-device device to use for photomaker (defaults to main-backend-device) + --vision-backend-device device to use for clip-vision model (defaults to main-backend-device) -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma @@ -129,9 +140,6 @@ Context Options: --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --mmap whether to memory-map model - --control-net-cpu keep controlnet in cpu (for low vram) - --clip-on-cpu keep clip in cpu (for low vram) - --vae-on-cpu keep vae in cpu (for low vram) --fa use flash attention --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model From 5ed41329da33f09fa20e781bc73e347ac6a0f852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Wed, 28 Jan 2026 21:06:02 +0100 Subject: [PATCH 18/23] update RPC docs --- docs/rpc.md | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/docs/rpc.md b/docs/rpc.md index 0a9b7ccd2..44485478c 100644 --- a/docs/rpc.md +++ b/docs/rpc.md @@ -1,12 +1,13 @@ # Building and Using the RPC Server with `stable-diffusion.cpp` -This guide covers how to build a version of the RPC server from `llama.cpp` that is compatible with your version of `stable-diffusion.cpp` to manage multi-backends setups. RPC allows you to offload specific model components to a remote server. +This guide covers how to build a version of [the RPC server from `llama.cpp`](https://github.com/ggml-org/llama.cpp/blob/master/tools/rpc/README.md) that is compatible with your version of `stable-diffusion.cpp` to manage multi-backends setups. RPC allows you to offload specific model components to a remote server. > **Note on Model Location:** The model files (e.g., `.safetensors` or `.gguf`) remain on the **Client** machine. The client parses the file and transmits the necessary tensor data and computational graphs to the server. The server does not need to store the model files locally. ## 1. Building `stable-diffusion.cpp` with RPC client First, you should build the client application from source. It requires `GGML_RPC=ON` to include the RPC backend to your client. + ```bash mkdir build cd build @@ -16,7 +17,7 @@ cmake .. \ cmake --build . --config Release -j $(nproc) ``` -> **Note:** Ensure you add the other flags you would normally use (e.g., `-DSD_VULKAN=ON`, `-DSD_CUDA=ON`, `-DSD_HIPBLAS=ON`, or `-DGGML_METAL=ON`), for more information about building `stable-diffusion.cpp` from source, please refer to the `build.md` documentation. +> **Note:** Ensure you add the other flags you would normally use (e.g., `-DSD_VULKAN=ON`, `-DSD_CUDA=ON`, `-DSD_HIPBLAS=ON`, or `-DGGML_METAL=ON`), for more information about building `stable-diffusion.cpp` from source, please refer to the [build.md](build.md) documentation. ## 2. Ensure `llama.cpp` is at the correct commit @@ -25,6 +26,7 @@ cmake --build . --config Release -j $(nproc) > **Start from Root:** Perform these steps from the root of your `stable-diffusion.cpp` directory. 1. Read the target commit hash from the submodule tracker: + ```bash # Linux / WSL / MacOS HASH=$(cat ggml/scripts/sync-llama.last) @@ -39,8 +41,7 @@ cmake --build . --config Release -j $(nproc) cd llama.cpp git checkout $HASH ``` - -To save on download time and storage, you can use a shallow clone to download only the target commit: + To save on download time and storage, you can use a shallow clone to download only the target commit: ```bash mkdir -p llama.cpp cd llama.cpp @@ -54,15 +55,16 @@ To save on download time and storage, you can use a shallow clone to download on The RPC server acts as the worker. You must explicitly enable the **backend** (the hardware interface, such as CUDA for Nvidia, Metal for Apple Silicon, or Vulkan) when building, otherwise the server will default to using only the CPU. -To find the correct flags, refer to the official documentation for the `llama.cpp` repository. +To find the correct flags for your system, refer to the official documentation for the [`llama.cpp`](https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md) repository. > **Crucial:** You must include the compiler flags required to satisfy the API compatibility with `stable-diffusion.cpp` (`-DGGML_MAX_NAME=128`). Without this flag, `GGML_MAX_NAME` will default to `64` for the server, and data transfers between the client and server will fail. Of course, `-DGGML_RPC` must also be enabled. > > I recommend disabling the `LLAMA_CURL` flag to avoid unnecessary dependencies, and disabling shared library builds to avoid potential conflicts. -> **Build Target:** We are specifically building the `rpc-server` target. This prevents the build system from compiling the entire `llama.cpp` suite (like `llama-cli`), making the build significantly faster. +> **Build Target:** We are specifically building the `rpc-server` target. This prevents the build system from compiling the entire `llama.cpp` suite (like `llama-server`), making the build significantly faster. ### Linux / WSL (Vulkan) + ```bash mkdir build cd build @@ -76,6 +78,7 @@ cmake --build . --config Release --target rpc-server -j $(nproc) ``` ### macOS (Metal) + ```bash mkdir build cd build @@ -89,6 +92,7 @@ cmake --build . --config Release --target rpc-server ``` ### Windows (Visual Studio 2022, Vulkan) + ```powershell mkdir build cd build @@ -112,10 +116,13 @@ Start the server. It listens for connections on the default address (usually `lo **On the Server :** If running on the same machine, you can use the default address: + ```bash ./rpc-server ``` + If you want to allow connections from other machines on the network: + ```bash ./rpc-server --host 0.0.0.0 ``` @@ -129,13 +136,16 @@ If you want to allow connections from other machines on the network: We're assuming the server is running on your local machine, and listening on the default port `50052`. If it's running on a different machine, you can replace `localhost` with the IP address of the server. **On the Client:** + ```bash ./sd-cli --rpc localhost:50052 --list-devices ``` + If the server is running and the client is able to connect, you should see `RPC0 localhost:50052` in the list of devices. -Example output: +Example output: (Client built without GPU acceleration, two GPUs available on the server) + ``` List of available GGML devices: Name Description @@ -166,23 +176,31 @@ Example: A main machine (192.168.1.10) with 3 GPUs, with one GPU running CUDA an **On the first machine (Running two server instances):** **Terminal 1 (CUDA):** + ```bash -# Linux / macOS / WSL +# Linux / WSL export CUDA_VISIBLE_DEVICES=0 -./rpc-server-cuda --host 0.0.0.0 +cd ./build_cuda/bin/Release +./rpc-server --host 0.0.0.0 # Windows PowerShell $env:CUDA_VISIBLE_DEVICES="0" -./rpc-server-cuda --host 0.0.0.0 +cd .\build_cuda\bin\Release +./rpc-server --host 0.0.0.0 ``` **Terminal 2 (Vulkan):** + ```bash -./rpc-server-vulkan --host 0.0.0.0 --port 50053 -d Vulkan1,Vulkan2 +cd ./build_vulkan/bin/Release +# ignore the first GPU (used by CUDA server) +./rpc-server --host 0.0.0.0 --port 50053 -d Vulkan1,Vulkan2 ``` **On the second machine:** + ```bash +cd ./build/bin/Release ./rpc-server --host 0.0.0.0 ``` @@ -199,4 +217,4 @@ The client will map these servers to sequential device IDs (e.g., RPC0 from the ## 6. Performance Considerations -RPC performance is heavily dependent on network bandwidth, as large weights and activations must be transferred back and forth over the network, especially for large models, or when using high resolutions. For best results, ensure your network connection is stable and has sufficient bandwidth (>1Gbps recommended). \ No newline at end of file +RPC performance is heavily dependent on network bandwidth, as large weights and activations must be transferred back and forth over the network, especially for large models, or when using high resolutions. For best results, ensure your network connection is stable and has sufficient bandwidth (>1Gbps recommended). From 75f223b5ef9a92dc3048163252faa6db763b6465 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Thu, 29 Jan 2026 12:34:32 +0100 Subject: [PATCH 19/23] fix apply_loras_immediately when using different non-CPU backends --- src/stable-diffusion.cpp | 56 +++++++++++++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 5a8226e9e..a58089b00 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1210,22 +1210,60 @@ class StableDiffusionGGML { } for (auto& kv : lora_state_diff) { + bool applied = false; int64_t t0 = ggml_time_ms(); // TODO: Fix that - bool are_clip_backends_compatible = true; + bool are_clip_backends_similar = true; for (auto backend: clip_backends){ - are_clip_backends_compatible = are_clip_backends_compatible && (diffusion_backend==backend || ggml_backend_is_cpu(backend)); + are_clip_backends_similar = are_clip_backends_similar && (clip_backends[0]==backend || ggml_backend_is_cpu(backend)); } - if(!are_clip_backends_compatible){ - LOG_WARN("Diffusion models and text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); + if(!are_clip_backends_similar){ + LOG_WARN("Text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); } - auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend); - if (!lora || lora->lora_tensors.empty()) { - continue; + auto lora_tensor_filter_diff = [&](const std::string& tensor_name) { + if (is_diffusion_model_name(tensor_name)) { + return true; + } + return false; + }; + auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend, lora_tensor_filter_diff); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; + } + + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return true; + } + return false; + }; + // TODO: split by model + lora = load_lora_model_from_file(kv.first, kv.second, clip_backends[0], lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; } - lora->apply(tensors, version, n_threads); - lora->free_params_buffer(); + auto lora_tensor_filter_first = [&](const std::string& tensor_name) { + if (is_first_stage_model_name(tensor_name)) { + return true; + } + return false; + }; + auto first_stage_backend = use_tiny_autoencoder ? tae_backend : vae_backend; + lora = load_lora_model_from_file(kv.first, kv.second, first_stage_backend, lora_tensor_filter_first); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; + } + + if (!applied) { + continue; + } int64_t t1 = ggml_time_ms(); LOG_INFO("lora '%s' applied, taking %.2fs", kv.first.c_str(), (t1 - t0) * 1.0f / 1000); From ab598fcf2af47f0653c2a1b1f86890c3894c1362 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Mon, 16 Feb 2026 15:19:51 +0100 Subject: [PATCH 20/23] Force sequencial tensor loading when using RPC --- CMakeLists.txt | 6 ++++++ docs/rpc.md | 4 ++-- src/stable-diffusion.cpp | 45 ++++++++++++++++++++++++++++++++++++---- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bad1ba4c2..afc756ad7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -77,6 +77,12 @@ if(SD_MUSA) add_definitions(-DSD_USE_CUDA) endif() +if (SD_RPC) + message("-- Use RPC as backend stable-diffusion") + set(GGML_RPC ON) + add_definitions(-DSD_USE_RPC) +endif () + set(SD_LIB stable-diffusion) file(GLOB SD_LIB_SOURCES diff --git a/docs/rpc.md b/docs/rpc.md index 44485478c..2f4e92282 100644 --- a/docs/rpc.md +++ b/docs/rpc.md @@ -6,13 +6,13 @@ This guide covers how to build a version of [the RPC server from `llama.cpp`](ht ## 1. Building `stable-diffusion.cpp` with RPC client -First, you should build the client application from source. It requires `GGML_RPC=ON` to include the RPC backend to your client. +First, you should build the client application from source. It requires `SD_RPC=ON` to include the RPC backend to your client. ```bash mkdir build cd build cmake .. \ - -DGGML_RPC=ON \ + -DSD_RPC=ON \ # Add other build flags here (e.g., -DSD_VULKAN=ON) cmake --build . --config Release -j $(nproc) ``` diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index a58089b00..6cdfd1517 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -24,6 +24,10 @@ #include "latent-preview.h" #include "name_conversion.h" +#if SD_USE_RPC +#include "ggml-rpc.h" +#endif + const char* model_version_to_str[] = { "SD 1.x", "SD 1.x Inpaint", @@ -863,7 +867,13 @@ class StableDiffusionGGML { } return false; }; - if (!pmid_lora->load_from_file(n_threads, lora_tensor_filter)) { + int n_th = n_threads; +#ifdef SD_USE_RPC + if (ggml_backend_is_rpc(diffusion_backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!pmid_lora->load_from_file(n_th, lora_tensor_filter)) { LOG_WARN("load photomaker lora tensors from %s failed", sd_ctx_params->photo_maker_path); return false; } @@ -955,7 +965,22 @@ class StableDiffusionGGML { if (version == VERSION_SVD) { ignore_tensors.insert("conditioner.embedders.3"); } - bool success = model_loader.load_tensors(tensors, ignore_tensors, n_threads, sd_ctx_params->enable_mmap); + int n_th = n_threads; +#ifdef SD_USE_RPC + // TODO: maybe set it to 1 threads only for model parts that are on remote? + bool is_any_clip_rpc = false; + for (auto& backend : clip_backends) { + if (ggml_backend_is_rpc(backend)) { + is_any_clip_rpc = true; + } + } + // I think those are all the backends that should get sent data to when calling model_loader.load_tensors() + if (is_any_clip_rpc || ggml_backend_is_rpc(diffusion_backend) || ggml_backend_is_rpc(vae_backend) || ggml_backend_is_rpc(vision_backend) || ggml_backend_is_rpc(pmid_backend)) { + LOG_DEBUG("Using single-thread for tensor loading because RPC backend is used"); + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + bool success = model_loader.load_tensors(tensors, ignore_tensors, n_th, sd_ctx_params->enable_mmap); if (!success) { LOG_ERROR("load tensors from model loader failed"); ggml_free(ctx); @@ -977,7 +1002,13 @@ class StableDiffusionGGML { } size_t control_net_params_mem_size = 0; if (control_net) { - if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path), n_threads)) { + int n_th = n_threads; +#ifdef SD_USE_RPC + if (ggml_backend_is_rpc(control_net_backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path), n_th)) { return false; } control_net_params_mem_size = control_net->get_params_buffer_size(); @@ -1174,7 +1205,13 @@ class StableDiffusionGGML { LOG_DEBUG("high noise lora: %s", lora_path.c_str()); } auto lora = std::make_shared(lora_id, backend, lora_path, is_high_noise ? "model.high_noise_" : "", version); - if (!lora->load_from_file(n_threads, lora_tensor_filter)) { + int n_th = n_threads; +#ifdef SD_USE_RPC + if (ggml_backend_is_rpc(backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!lora->load_from_file(n_th, lora_tensor_filter)) { LOG_WARN("load lora tensors from %s failed", lora_path.c_str()); return nullptr; } From 4773e1df0ded2252b42715828bcdfd65fd8db7f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Sun, 1 Mar 2026 20:49:07 +0100 Subject: [PATCH 21/23] fix build --- src/stable-diffusion.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 6cdfd1517..d48146526 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -665,7 +665,7 @@ class StableDiffusionGGML { version, sd_ctx_params->qwen_image_zero_cond_t); } else if (sd_version_is_anima(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map); diffusion_model = std::make_shared(backend, From 046ffc36f6acdecebd75667d8835b61d77eda164 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Thu, 19 Mar 2026 14:30:39 +0100 Subject: [PATCH 22/23] Get first stage backend for loading loras --- src/ggml_extend.hpp | 8 ++++++++ src/stable-diffusion.cpp | 6 +++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index 9c1a27f68..78c572ac0 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -2208,6 +2208,14 @@ struct GGMLRunner { void set_weight_adapter(const std::shared_ptr& adapter) { weight_adapter = adapter; } + + ggml_backend_t get_runtime_backend() { + return runtime_backend; + } + + ggml_backend_t get_params_backend() { + return params_backend; + } }; class GGMLBlock { diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index d48146526..86d9b124e 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1263,6 +1263,8 @@ class StableDiffusionGGML { } return false; }; + + LOG_INFO("applying lora to diffusion model"); auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend, lora_tensor_filter_diff); if (lora && !lora->lora_tensors.empty()) { lora->apply(tensors, version, n_threads); @@ -1277,6 +1279,7 @@ class StableDiffusionGGML { return false; }; // TODO: split by model + LOG_INFO("applying lora to text encoders"); lora = load_lora_model_from_file(kv.first, kv.second, clip_backends[0], lora_tensor_filter_cond); if (lora && !lora->lora_tensors.empty()) { lora->apply(tensors, version, n_threads); @@ -1290,7 +1293,8 @@ class StableDiffusionGGML { } return false; }; - auto first_stage_backend = use_tiny_autoencoder ? tae_backend : vae_backend; + LOG_INFO("applying lora to first stage model"); + auto first_stage_backend = first_stage_model->get_params_backend(); lora = load_lora_model_from_file(kv.first, kv.second, first_stage_backend, lora_tensor_filter_first); if (lora && !lora->lora_tensors.empty()) { lora->apply(tensors, version, n_threads); From 9d47c33caec799d0a3e1ec1f7794ac37b76e3941 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20du=20Hamel?= Date: Thu, 19 Mar 2026 15:27:34 +0100 Subject: [PATCH 23/23] Fix lora loading when using multiple clip backends --- src/conditioner.hpp | 171 ++++++++++++++++++++++++++++++++++++--- src/stable-diffusion.cpp | 68 ++++++++-------- 2 files changed, 196 insertions(+), 43 deletions(-) diff --git a/src/conditioner.hpp b/src/conditioner.hpp index 610039927..35442e1c2 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -82,6 +82,7 @@ struct Conditioner { virtual ~Conditioner() = default; public: + int model_count = 1; virtual SDCondition get_learned_condition(int n_threads, const ConditionerParams& conditioner_params) = 0; virtual void alloc_params_buffer() = 0; @@ -97,6 +98,11 @@ struct Conditioner { virtual std::string remove_trigger_from_prompt(const std::string& prompt) { GGML_ABORT("Not implemented yet!"); } + virtual bool is_cond_stage_model_name_at_index(const std::string& name, int index) { + return true; + } + virtual ggml_backend_t get_params_backend_at_index(int index) = 0; + virtual ggml_backend_t get_runtime_backend_at_index(int index) = 0; }; // ldm.modules.encoders.modules.FrozenCLIPEmbedder @@ -139,8 +145,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { LOG_INFO("CLIP-H: using %s backend", ggml_backend_name(clip_backend)); text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); } else if (sd_version_is_sdxl(version)) { + model_count = 2; ggml_backend_t clip_g_backend = clip_backend; - if (backends.size() >= 2){ + if (backends.size() >= 2) { clip_g_backend = backends[1]; if (backends.size() > 2) { LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); @@ -669,6 +676,42 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { conditioner_params.adm_in_channels, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (sd_version_is_sdxl(version)) { + if (index == 0) { + return contains(name, "cond_stage_model.model.transformer"); + } else if (index == 1) { + return contains(name, "cond_stage_model.model.1"); + } else { + return false; + } + } + return true; + } + + ggml_backend_t get_params_backend_at_index(int index){ + if (sd_version_is_sdxl(version) && index == 1){ + if(text_model2) { + return text_model2->get_params_backend(); + } + } else if (text_model) { + return text_model->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (sd_version_is_sdxl(version) && index == 1){ + if(text_model2) { + return text_model2->get_runtime_backend(); + } + } else if (text_model) { + return text_model->get_runtime_backend(); + } + return nullptr; + } + }; struct FrozenCLIPVisionEmbedder : public GGMLRunner { @@ -741,12 +784,14 @@ struct SD3CLIPEmbedder : public Conditioner { bool use_clip_g = false; bool use_t5 = false; + model_count = 3; + ggml_backend_t clip_l_backend, clip_g_backend, t5_backend; if (backends.size() == 1) { clip_l_backend = clip_g_backend = t5_backend = backends[0]; } else if (backends.size() == 2) { clip_l_backend = clip_g_backend = backends[0]; - t5_backend = backends[1]; + t5_backend = backends[1]; } else if (backends.size() >= 3) { clip_l_backend = backends[0]; clip_g_backend = backends[1]; @@ -1098,6 +1143,42 @@ struct SD3CLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (index == 0) { + return contains(name, "text_encoders.clip_l"); + } else if (index == 1) { + return contains(name, "text_encoders.clip_g"); + } else if (index == 2) { + return contains(name, "text_encoders.t5xxl"); + } else { + return false; + } + } + + ggml_backend_t get_params_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_params_backend(); + } else if (index == 1 && clip_g) { + return clip_g->get_params_backend(); + } else if (index == 2 && t5) { + return t5->get_params_backend(); + } else { + return nullptr; + } + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_runtime_backend(); + } else if (index == 1 && clip_g) { + return clip_g->get_runtime_backend(); + } else if (index == 2 && t5) { + return t5->get_runtime_backend(); + } else { + return nullptr; + } + } }; struct FluxCLIPEmbedder : public Conditioner { @@ -1113,19 +1194,19 @@ struct FluxCLIPEmbedder : public Conditioner { bool use_clip_l = false; bool use_t5 = false; + model_count = 2; ggml_backend_t clip_l_backend, t5_backend; if (backends.size() == 1) { clip_l_backend = t5_backend = backends[0]; } else if (backends.size() >= 2) { clip_l_backend = backends[0]; - t5_backend = backends[1]; + t5_backend = backends[1]; if (backends.size() > 2) { LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); } } - for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -1358,6 +1439,36 @@ struct FluxCLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (index == 0) { + return contains(name, "text_encoders.clip_l"); + } else if (index == 1) { + return contains(name, "text_encoders.t5xxl"); + } else { + return false; + } + } + + ggml_backend_t get_params_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_params_backend(); + } else if (index == 1 && t5) { + return t5->get_params_backend(); + } else { + return nullptr; + } + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (index == 0 && clip_l) { + return clip_l->get_runtime_backend(); + } else if (index == 1 && t5) { + return t5->get_runtime_backend(); + } else { + return nullptr; + } + } }; struct T5CLIPEmbedder : public Conditioner { @@ -1554,6 +1665,20 @@ struct T5CLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + ggml_backend_t get_params_backend_at_index(int index){ + if (t5){ + return t5->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (t5){ + return t5->get_runtime_backend(); + } + return nullptr; + } }; struct AnimaConditioner : public Conditioner { @@ -1566,11 +1691,11 @@ struct AnimaConditioner : public Conditioner { const String2TensorStorage& tensor_storage_map = {}) { qwen_tokenizer = std::make_shared(); llm = std::make_shared(LLM::LLMArch::QWEN3, - backend, - offload_params_to_cpu, - tensor_storage_map, - "text_encoders.llm", - false); + backend, + offload_params_to_cpu, + tensor_storage_map, + "text_encoders.llm", + false); } void get_param_tensors(std::map& tensors) override { @@ -1668,6 +1793,20 @@ struct AnimaConditioner : public Conditioner { result.c_t5_weights = std::move(t5_weight_tensor); return result; } + + ggml_backend_t get_params_backend_at_index(int index){ + if (llm){ + return llm->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (llm){ + return llm->get_runtime_backend(); + } + return nullptr; + } }; struct LLMEmbedder : public Conditioner { @@ -2012,6 +2151,20 @@ struct LLMEmbedder : public Conditioner { result.extra_c_crossattns = std::move(extra_hidden_states_vec); return result; } + + ggml_backend_t get_params_backend_at_index(int index){ + if (llm){ + return llm->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index){ + if (llm){ + return llm->get_runtime_backend(); + } + return nullptr; + } }; #endif diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index 86d9b124e..1be1dddfb 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1249,14 +1249,6 @@ class StableDiffusionGGML { for (auto& kv : lora_state_diff) { bool applied = false; int64_t t0 = ggml_time_ms(); - // TODO: Fix that - bool are_clip_backends_similar = true; - for (auto backend: clip_backends){ - are_clip_backends_similar = are_clip_backends_similar && (clip_backends[0]==backend || ggml_backend_is_cpu(backend)); - } - if(!are_clip_backends_similar){ - LOG_WARN("Text encoders are running on different backends. This may cause issues when immediately applying LoRAs."); - } auto lora_tensor_filter_diff = [&](const std::string& tensor_name) { if (is_diffusion_model_name(tensor_name)) { return true; @@ -1272,19 +1264,22 @@ class StableDiffusionGGML { applied = true; } - auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { - if (is_cond_stage_model_name(tensor_name)) { - return true; + for (int i = 0; i < cond_stage_model->model_count; i++) { + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return cond_stage_model->is_cond_stage_model_name_at_index(tensor_name, i); + } + return false; + }; + // TODO: split by model + LOG_INFO("applying lora to text encoder (%d)", i); + auto backend = cond_stage_model->get_params_backend_at_index(i); + lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; } - return false; - }; - // TODO: split by model - LOG_INFO("applying lora to text encoders"); - lora = load_lora_model_from_file(kv.first, kv.second, clip_backends[0], lora_tensor_filter_cond); - if (lora && !lora->lora_tensors.empty()) { - lora->apply(tensors, version, n_threads); - lora->free_params_buffer(); - applied = true; } auto lora_tensor_filter_first = [&](const std::string& tensor_name) { @@ -1346,22 +1341,27 @@ class StableDiffusionGGML { } } cond_stage_lora_models = lora_models; - auto lora_tensor_filter = [&](const std::string& tensor_name) { - if (is_cond_stage_model_name(tensor_name)) { - return true; - } - return false; - }; - for (auto& kv : lora_state_diff) { - const std::string& lora_id = kv.first; - float multiplier = kv.second; - //TODO: split by model - auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backends[0], lora_tensor_filter); - if (lora && !lora->lora_tensors.empty()) { - lora->preprocess_lora_tensors(tensors); - cond_stage_lora_models.push_back(lora); + + + for(int i=0;imodel_count;i++){ + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return cond_stage_model->is_cond_stage_model_name_at_index(tensor_name, i); + } + return false; + }; + for (auto& kv : lora_state_diff) { + const std::string& lora_id = kv.first; + float multiplier = kv.second; + auto backend = cond_stage_model->get_runtime_backend_at_index(i); + auto lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->preprocess_lora_tensors(tensors); + cond_stage_lora_models.push_back(lora); + } } } + auto multi_lora_adapter = std::make_shared(cond_stage_lora_models); cond_stage_model->set_weight_adapter(multi_lora_adapter); }