diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1fbcbf94d..4e0b9ae98 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -75,10 +75,14 @@ jobs: - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. -DGGML_AVX2=ON -DSD_BUILD_SHARED_LIBS=ON - cmake --build . --config Release + cmake -B build \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_RPATH='$ORIGIN' \ + -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ + -DGGML_BACKEND_DL=ON \ + -DGGML_NATIVE=OFF \ + -DGGML_CPU_ALL_VARIANTS=ON + cmake --build build -j $(nproc) - name: Get commit hash id: commit @@ -138,10 +142,15 @@ jobs: - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. -DSD_BUILD_SHARED_LIBS=ON -DSD_VULKAN=ON - cmake --build . --config Release + cmake -B build \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_RPATH='$ORIGIN' \ + -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ + -DGGML_BACKEND_DL=ON \ + -DGGML_NATIVE=OFF \ + -DGGML_CPU_ALL_VARIANTS=ON \ + -DGGML_VULKAN=ON + cmake --build build -j $(nproc) - name: Get commit hash id: commit @@ -275,10 +284,13 @@ jobs: id: cmake_build run: | sysctl -a - mkdir build - cd build - cmake .. -DGGML_AVX2=ON -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" -DSD_BUILD_SHARED_LIBS=ON - cmake --build . --config Release + cmake -B build \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_RPATH='@loader_path' \ + -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ + -DGGML_METAL=ON \ + -DGGML_METAL_EMBED_LIBRARY=ON + cmake --build build -j $(sysctl -n hw.logicalcpu) - name: Get commit hash id: commit @@ -318,18 +330,12 @@ jobs: strategy: matrix: include: - - build: "noavx" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DSD_BUILD_SHARED_LIBS=ON" - - build: "avx2" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX2=ON -DSD_BUILD_SHARED_LIBS=ON" - - build: "avx" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX=ON -DGGML_AVX2=OFF -DSD_BUILD_SHARED_LIBS=ON" - - build: "avx512" - defines: "-DGGML_NATIVE=OFF -DGGML_AVX512=ON -DGGML_AVX=ON -DGGML_AVX2=ON -DSD_BUILD_SHARED_LIBS=ON" + - build: "cpu" + defines: "-DGGML_NATIVE=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON" - build: "cuda12" - defines: "-DSD_CUDA=ON -DSD_BUILD_SHARED_LIBS=ON -DCMAKE_CUDA_ARCHITECTURES='61;70;75;80;86;89;90;100;120' -DCMAKE_CUDA_FLAGS='-Xcudafe \"--diag_suppress=177\" -Xcudafe \"--diag_suppress=550\"'" + defines: "-DGGML_NATIVE=OFF -DGGML_CUDA=ON -DGGML_BACKEND_DL=ON -DCMAKE_CUDA_ARCHITECTURES='61;70;75;80;86;89;90;100;120' -DCMAKE_CUDA_FLAGS='-Xcudafe \"--diag_suppress=177\" -Xcudafe \"--diag_suppress=550\"'" - build: "vulkan" - defines: "-DSD_VULKAN=ON -DSD_BUILD_SHARED_LIBS=ON" + defines: "-DGGML_NATIVE=OFF -DGGML_CPU=ON -DGGML_VULKAN=ON -DGGML_BACKEND_DL=ON" steps: - name: Clone id: checkout @@ -377,19 +383,6 @@ jobs: cmake .. -DCMAKE_CXX_FLAGS='/bigobj' -G Ninja -DCMAKE_C_COMPILER=cl.exe -DCMAKE_CXX_COMPILER=cl.exe -DCMAKE_BUILD_TYPE=Release ${{ matrix.defines }} cmake --build . - - name: Check AVX512F support - id: check_avx512f - if: ${{ matrix.build == 'avx512' }} - continue-on-error: true - run: | - cd build - $vcdir = $(vswhere -latest -products * -requires Microsoft.VisualStudio.Component.VC.Tools.x86.x64 -property installationPath) - $msvc = $(join-path $vcdir $('VC\Tools\MSVC\'+$(gc -raw $(join-path $vcdir 'VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt')).Trim())) - $cl = $(join-path $msvc 'bin\Hostx64\x64\cl.exe') - echo 'int main(void){unsigned int a[4];__cpuid(a,7);return !(a[1]&65536);}' >> avx512f.c - & $cl /O2 /GS- /kernel avx512f.c /link /nodefaultlib /entry:main - .\avx512f.exe && echo "AVX512F: YES" && ( echo HAS_AVX512F=1 >> $env:GITHUB_ENV ) || echo "AVX512F: NO" - - name: Get commit hash id: commit if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} @@ -514,9 +507,10 @@ jobs: $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" cmake .. ` -G "Unix Makefiles" ` - -DSD_HIPBLAS=ON ` - -DSD_BUILD_SHARED_LIBS=ON ` + -DGGML_HIP=ON ` -DGGML_NATIVE=OFF ` + -DGGML_CPU=ON ` + -DGGML_BACKEND_DL=ON ` -DCMAKE_C_COMPILER=clang ` -DCMAKE_CXX_COMPILER=clang++ ` -DCMAKE_BUILD_TYPE=Release ` @@ -643,12 +637,15 @@ jobs: -DCMAKE_CXX_COMPILER=amdclang++ \ -DCMAKE_C_COMPILER=amdclang \ -DCMAKE_BUILD_TYPE=Release \ - -DSD_HIPBLAS=ON \ + -DGGML_BACKEND_DL=ON \ + -DGGML_NATIVE=OFF \ + -DCMAKE_INSTALL_RPATH='$ORIGIN' \ + -DGGML_CPU_ALL_VARIANTS=ON \ + -DGGML_HIP=ON \ -DGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DAMDGPU_TARGETS="${{ env.GPU_TARGETS }}" \ -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON \ - -DCMAKE_POSITION_INDEPENDENT_CODE=ON \ - -DSD_BUILD_SHARED_LIBS=ON + -DCMAKE_POSITION_INDEPENDENT_CODE=ON cmake --build . --config Release - name: Get commit hash diff --git a/CMakeLists.txt b/CMakeLists.txt index bad1ba4c2..f18c81141 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,6 +8,8 @@ if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/") + if (MSVC) add_compile_definitions(_CRT_SECURE_NO_WARNINGS) add_compile_definitions(_SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING) @@ -22,61 +24,85 @@ else() set(SD_STANDALONE OFF) endif() +if (MINGW) + set(BUILD_SHARED_LIBS_DEFAULT OFF) + else() + set(BUILD_SHARED_LIBS_DEFAULT ON) +endif() + +option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT}) + +if (WIN32) + add_compile_definitions(_CRT_SECURE_NO_WARNINGS) +endif() + +if (MSVC) + add_compile_options("$<$:/utf-8>") + add_compile_options("$<$:/utf-8>") + add_compile_options("$<$:/bigobj>") + add_compile_options("$<$:/bigobj>") +endif() + +if (SD_STANDALONE) + # enable parallel builds for msbuild + list(APPEND CMAKE_VS_GLOBALS UseMultiToolTask=true) + list(APPEND CMAKE_VS_GLOBALS EnforceProcessCountAcrossBuilds=true) +endif() + + # # Option list # - # general #option(SD_BUILD_TESTS "sd: build tests" ${SD_STANDALONE}) option(SD_BUILD_EXAMPLES "sd: build examples" ${SD_STANDALONE}) -option(SD_CUDA "sd: cuda backend" OFF) -option(SD_HIPBLAS "sd: rocm backend" OFF) -option(SD_METAL "sd: metal backend" OFF) -option(SD_VULKAN "sd: vulkan backend" OFF) -option(SD_OPENCL "sd: opencl backend" OFF) -option(SD_SYCL "sd: sycl backend" OFF) -option(SD_MUSA "sd: musa backend" OFF) -option(SD_BUILD_SHARED_LIBS "sd: build shared libs" OFF) -option(SD_BUILD_SHARED_GGML_LIB "sd: build ggml as a separate shared lib" OFF) option(SD_USE_SYSTEM_GGML "sd: use system-installed GGML library" OFF) #option(SD_BUILD_SERVER "sd: build server example" ON) -if(SD_CUDA) - message("-- Use CUDA as backend stable-diffusion") - set(GGML_CUDA ON) - add_definitions(-DSD_USE_CUDA) -endif() +# Required for relocatable CMake package +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) +include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake) -if(SD_METAL) - message("-- Use Metal as backend stable-diffusion") - set(GGML_METAL ON) - add_definitions(-DSD_USE_METAL) +if (NOT DEFINED SD_BUILD_NUMBER) + set(SD_BUILD_NUMBER ${BUILD_NUMBER}) endif() +if (NOT DEFINED SD_BUILD_COMMIT) + set(SD_BUILD_COMMIT ${BUILD_COMMIT}) +endif() +set(SD_INSTALL_VERSION 0.0.${SD_BUILD_NUMBER}) -if (SD_VULKAN) - message("-- Use Vulkan as backend stable-diffusion") - set(GGML_VULKAN ON) - add_definitions(-DSD_USE_VULKAN) -endif () - -if (SD_OPENCL) - message("-- Use OpenCL as backend stable-diffusion") - set(GGML_OPENCL ON) - add_definitions(-DSD_USE_OPENCL) -endif () - -if (SD_HIPBLAS) - message("-- Use HIPBLAS as backend stable-diffusion") - set(GGML_HIP ON) - add_definitions(-DSD_USE_CUDA) -endif () +# override ggml options +set(GGML_ALL_WARNINGS ${SD_ALL_WARNINGS}) +set(GGML_FATAL_WARNINGS ${SD_FATAL_WARNINGS}) -if(SD_MUSA) - message("-- Use MUSA as backend stable-diffusion") - set(GGML_MUSA ON) - add_definitions(-DSD_USE_CUDA) +if (NOT DEFINED GGML_CUDA_GRAPHS) + set(GGML_CUDA_GRAPHS_DEFAULT ON) endif() +# Ref: https://github.com/ggml-org/llama.cpp/blob/master/CMakeLists.txt#L145 +# transition helpers +function (sd_option_depr TYPE OLD) + if (${OLD}) + set(NEW "${ARGV2}") + if(NEW) + message(${TYPE} "${OLD} is deprecated, use ${NEW} instead") + set(${NEW} ON PARENT_SCOPE) + else() + message(${TYPE} "${OLD} is deprecated and will be ignored") + endif() + endif() +endfunction() + +sd_option_depr(FATAL_ERROR SD_HIPBLAS GGML_CUDA) +sd_option_depr(FATAL_ERROR SD_BUILD_SHARED_LIBS BUILD_SHARED_LIBS) +sd_option_depr(FATAL_ERROR SD_BUILD_SHARED_GGML_LIB BUILD_SHARED_LIBS) +sd_option_depr(WARNING SD_CUDA GGML_CUDA) +sd_option_depr(WARNING SD_METAL GGML_METAL) +sd_option_depr(WARNING SD_VULKAN GGML_VULKAN) +sd_option_depr(WARNING SD_OPENCL GGML_OPENCL) +sd_option_depr(WARNING SD_SYCL GGML_SYCL) +sd_option_depr(WARNING SD_MUSA GGML_MUSA) + set(SD_LIB stable-diffusion) file(GLOB SD_LIB_SOURCES @@ -119,29 +145,9 @@ set_property( SDCPP_BUILD_COMMIT=${SDCPP_BUILD_COMMIT} SDCPP_BUILD_VERSION=${SDCPP_BUILD_VERSION} ) -if(SD_BUILD_SHARED_LIBS) - message("-- Build shared library") - message(${SD_LIB_SOURCES}) - if(NOT SD_BUILD_SHARED_GGML_LIB) - set(BUILD_SHARED_LIBS OFF) - endif() - add_library(${SD_LIB} SHARED ${SD_LIB_SOURCES}) - add_definitions(-DSD_BUILD_SHARED_LIB) - target_compile_definitions(${SD_LIB} PRIVATE -DSD_BUILD_DLL) - set(CMAKE_POSITION_INDEPENDENT_CODE ON) -else() - message("-- Build static library") - if(NOT SD_BUILD_SHARED_GGML_LIB) - set(BUILD_SHARED_LIBS OFF) - endif() - add_library(${SD_LIB} STATIC ${SD_LIB_SOURCES}) -endif() - -if(SD_SYCL) - message("-- Use SYCL as backend stable-diffusion") - set(GGML_SYCL ON) +# Is this needed? +if(GGML_SYCL) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing -fsycl") - add_definitions(-DSD_USE_SYCL) # disable fast-math on host, see: # https://www.intel.com/content/www/us/en/docs/cpp-compiler/developer-guide-reference/2021-10/fp-model-fp.html if (WIN32) @@ -175,18 +181,52 @@ if (NOT TARGET ggml) endif() add_subdirectory(thirdparty) - -target_link_libraries(${SD_LIB} PUBLIC ggml zip) -target_include_directories(${SD_LIB} PUBLIC . include) -target_include_directories(${SD_LIB} PUBLIC . thirdparty) -target_compile_features(${SD_LIB} PUBLIC c_std_11 cxx_std_17) - +add_subdirectory(src) if (SD_BUILD_EXAMPLES) add_subdirectory(examples) endif() -set(SD_PUBLIC_HEADERS include/stable-diffusion.h) -set_target_properties(${SD_LIB} PROPERTIES PUBLIC_HEADER "${SD_PUBLIC_HEADERS}") +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) + +set(SD_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") +set(SD_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") +set(SD_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") +set(SD_PUBLIC_HEADERS + ${CMAKE_CURRENT_SOURCE_DIR}/include/stable-diffusion.h +) -install(TARGETS ${SD_LIB} LIBRARY PUBLIC_HEADER) +set_target_properties(${SD_LIB} + PROPERTIES + PUBLIC_HEADER "${SD_PUBLIC_HEADERS}") + +install(TARGETS ${SD_LIB} + RUNTIME DESTINATION ${SD_BIN_INSTALL_DIR} + LIBRARY DESTINATION ${SD_LIB_INSTALL_DIR} + ARCHIVE DESTINATION ${SD_LIB_INSTALL_DIR} + PUBLIC_HEADER DESTINATION ${SD_INCLUDE_INSTALL_DIR}) + +configure_package_config_file( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/sd-config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/sd-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/sd + PATH_VARS SD_INCLUDE_INSTALL_DIR + SD_LIB_INSTALL_DIR + SD_BIN_INSTALL_DIR ) + +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/sd-version.cmake + VERSION ${SD_INSTALL_VERSION} + COMPATIBILITY SameMajorVersion) + +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/sd-config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/sd-version.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/sd) + +configure_file(cmake/sd.pc.in + "${CMAKE_CURRENT_BINARY_DIR}/sd.pc" + @ONLY) + +install(FILES "${CMAKE_CURRENT_BINARY_DIR}/sd.pc" + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) diff --git a/Dockerfile.cuda b/Dockerfile.cuda index 4deb72477..745d12559 100644 --- a/Dockerfile.cuda +++ b/Dockerfile.cuda @@ -10,7 +10,7 @@ WORKDIR /sd.cpp COPY . . ARG CUDACXX=/usr/local/cuda/bin/nvcc -RUN cmake . -B ./build -DSD_CUDA=ON +RUN cmake . -B ./build -DGGML_CUDA=ON RUN cmake --build ./build --config Release -j$(nproc) FROM nvidia/cuda:${CUDA_VERSION}-cudnn-runtime-ubuntu${UBUNTU_VERSION} AS runtime diff --git a/Dockerfile.musa b/Dockerfile.musa index 2d95f817f..407e7fc5c 100644 --- a/Dockerfile.musa +++ b/Dockerfile.musa @@ -13,7 +13,7 @@ RUN mkdir build && cd build && \ cmake .. -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS="${CMAKE_C_FLAGS} -fopenmp -I/usr/lib/llvm-14/lib/clang/14.0.0/include -L/usr/lib/llvm-14/lib" \ -DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} -fopenmp -I/usr/lib/llvm-14/lib/clang/14.0.0/include -L/usr/lib/llvm-14/lib" \ - -DSD_MUSA=ON -DCMAKE_BUILD_TYPE=Release && \ + -DGGML_MUSA=ON -DCMAKE_BUILD_TYPE=Release && \ cmake --build . --config Release FROM mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}-amd64 as runtime diff --git a/Dockerfile.sycl b/Dockerfile.sycl index 466d5517c..24d7b2d16 100644 --- a/Dockerfile.sycl +++ b/Dockerfile.sycl @@ -9,7 +9,7 @@ WORKDIR /sd.cpp COPY . . RUN mkdir build && cd build && \ - cmake .. -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DSD_SYCL=ON -DCMAKE_BUILD_TYPE=Release && \ + cmake .. -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DCMAKE_BUILD_TYPE=Release && \ cmake --build . --config Release -j$(nproc) FROM intel/oneapi-basekit:${SYCL_VERSION}-devel-ubuntu24.04 AS runtime diff --git a/Dockerfile.vulkan b/Dockerfile.vulkan index 5ba6cb05d..5c85fa1ad 100644 --- a/Dockerfile.vulkan +++ b/Dockerfile.vulkan @@ -8,7 +8,7 @@ WORKDIR /sd.cpp COPY . . -RUN cmake . -B ./build -DSD_VULKAN=ON +RUN cmake . -B ./build -DGGML_VULKAN=ON RUN cmake --build ./build --config Release --parallel FROM ubuntu:$UBUNTU_VERSION AS runtime diff --git a/cmake/build-info.cmake b/cmake/build-info.cmake new file mode 100644 index 000000000..3194f8159 --- /dev/null +++ b/cmake/build-info.cmake @@ -0,0 +1,48 @@ +set(BUILD_NUMBER 0) +set(BUILD_COMMIT "unknown") +set(BUILD_COMPILER "unknown") +set(BUILD_TARGET "unknown") + +# Look for git +find_package(Git) +if(NOT Git_FOUND) + find_program(GIT_EXECUTABLE NAMES git git.exe) + if(GIT_EXECUTABLE) + set(Git_FOUND TRUE) + message(STATUS "Found Git: ${GIT_EXECUTABLE}") + else() + message(WARNING "Git not found. Build info will not be accurate.") + endif() +endif() + +# Get the commit count and hash +if(Git_FOUND) + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-parse --short HEAD + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE HEAD + OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE RES + ) + if (RES EQUAL 0) + set(BUILD_COMMIT ${HEAD}) + endif() + execute_process( + COMMAND ${GIT_EXECUTABLE} rev-list --count HEAD + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE COUNT + OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE RES + ) + if (RES EQUAL 0) + set(BUILD_NUMBER ${COUNT}) + endif() +endif() + +set(BUILD_COMPILER "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}") + +if(CMAKE_VS_PLATFORM_NAME) + set(BUILD_TARGET ${CMAKE_VS_PLATFORM_NAME}) +else() + set(BUILD_TARGET "${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_PROCESSOR}") +endif() \ No newline at end of file diff --git a/cmake/common.cmake b/cmake/common.cmake new file mode 100644 index 000000000..9176dce60 --- /dev/null +++ b/cmake/common.cmake @@ -0,0 +1,60 @@ +include("ggml/cmake/common.cmake") + +# https://github.com/ggml-org/llama.cpp/blob/master/cmake/common.cmake + +function(sd_add_compile_flags) + if (SD_FATAL_WARNINGS) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + list(APPEND C_FLAGS -Werror) + list(APPEND CXX_FLAGS -Werror) + elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_compile_options(/WX) + endif() + endif() + + if (SD_ALL_WARNINGS) + if (NOT MSVC) + list(APPEND C_FLAGS -Wshadow -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes + -Werror=implicit-int -Werror=implicit-function-declaration) + + list(APPEND CXX_FLAGS -Wmissing-declarations -Wmissing-noreturn) + + list(APPEND WARNING_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function) + + list(APPEND C_FLAGS ${WARNING_FLAGS}) + list(APPEND CXX_FLAGS ${WARNING_FLAGS}) + + ggml_get_flags(${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}) + + add_compile_options("$<$:${C_FLAGS};${GF_C_FLAGS}>" + "$<$:${CXX_FLAGS};${GF_CXX_FLAGS}>") + else() + # todo : msvc + set(C_FLAGS "" PARENT_SCOPE) + set(CXX_FLAGS "" PARENT_SCOPE) + endif() + endif() + + if (NOT MSVC) + if (SD_SANITIZE_THREAD) + message(STATUS "Using -fsanitize=thread") + + add_compile_options(-fsanitize=thread) + link_libraries (-fsanitize=thread) + endif() + + if (SD_SANITIZE_ADDRESS) + message(STATUS "Using -fsanitize=address") + + add_compile_options(-fsanitize=address -fno-omit-frame-pointer) + link_libraries (-fsanitize=address) + endif() + + if (SD_SANITIZE_UNDEFINED) + message(STATUS "Using -fsanitize=undefined") + + add_compile_options(-fsanitize=undefined) + link_libraries (-fsanitize=undefined) + endif() + endif() +endfunction() \ No newline at end of file diff --git a/cmake/sd-config.cmake.in b/cmake/sd-config.cmake.in new file mode 100644 index 000000000..7b224924e --- /dev/null +++ b/cmake/sd-config.cmake.in @@ -0,0 +1,30 @@ +set(SD_VERSION @SD_INSTALL_VERSION@) +set(SD_BUILD_COMMIT @SD_BUILD_COMMIT@) +set(SD_BUILD_NUMBER @SD_BUILD_NUMBER@) +set(SD_SHARED_LIB @BUILD_SHARED_LIBS@) + +@PACKAGE_INIT@ + +set_and_check(SD_INCLUDE_DIR "@PACKAGE_SD_INCLUDE_INSTALL_DIR@") +set_and_check(SD_LIB_DIR "@PACKAGE_SD_LIB_INSTALL_DIR@") +set_and_check(SD_BIN_DIR "@PACKAGE_SD_BIN_INSTALL_DIR@") + +find_package(ggml REQUIRED HINTS ${SD_LIB_DIR}/cmake) + +find_library(stable-diffusion_LIBRARY stable-diffusion + REQUIRED + HINTS ${SD_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH +) + +add_library(stable-diffusion UNKNOWN IMPORTED) +set_target_properties(stable-diffusion + PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${SD_INCLUDE_DIR}" + INTERFACE_LINK_LIBRARIES "ggml::ggml;ggml::ggml-base;" + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + IMPORTED_LOCATION "${stable-diffusion_LIBRARY}" + INTERFACE_COMPILE_FEATURES "c_std_11;cxx_std_17" + POSITION_INDEPENDENT_CODE ON) + +check_required_components(Stable-diffusion) \ No newline at end of file diff --git a/cmake/sd.pc.in b/cmake/sd.pc.in new file mode 100644 index 000000000..f0a5ebc7d --- /dev/null +++ b/cmake/sd.pc.in @@ -0,0 +1,10 @@ +prefix=@CMAKE_INSTALL_PREFIX@ +exec_prefix=@CMAKE_INSTALL_PREFIX@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ + +Name: stable-diffusion +Description: Diffusion model(SD,Flux,Wan,Qwen Image,Z-Image,...) inference in pure C/C++ +Version: @SD_INSTALL_VERSION@ +Libs: -L${libdir} -lggml -lggml-base -lstable-diffusion +Cflags: -I${includedir} \ No newline at end of file diff --git a/docs/rpc.md b/docs/rpc.md new file mode 100644 index 000000000..2f4e92282 --- /dev/null +++ b/docs/rpc.md @@ -0,0 +1,220 @@ +# Building and Using the RPC Server with `stable-diffusion.cpp` + +This guide covers how to build a version of [the RPC server from `llama.cpp`](https://github.com/ggml-org/llama.cpp/blob/master/tools/rpc/README.md) that is compatible with your version of `stable-diffusion.cpp` to manage multi-backends setups. RPC allows you to offload specific model components to a remote server. + +> **Note on Model Location:** The model files (e.g., `.safetensors` or `.gguf`) remain on the **Client** machine. The client parses the file and transmits the necessary tensor data and computational graphs to the server. The server does not need to store the model files locally. + +## 1. Building `stable-diffusion.cpp` with RPC client + +First, you should build the client application from source. It requires `SD_RPC=ON` to include the RPC backend to your client. + +```bash +mkdir build +cd build +cmake .. \ + -DSD_RPC=ON \ + # Add other build flags here (e.g., -DSD_VULKAN=ON) +cmake --build . --config Release -j $(nproc) +``` + +> **Note:** Ensure you add the other flags you would normally use (e.g., `-DSD_VULKAN=ON`, `-DSD_CUDA=ON`, `-DSD_HIPBLAS=ON`, or `-DGGML_METAL=ON`), for more information about building `stable-diffusion.cpp` from source, please refer to the [build.md](build.md) documentation. + +## 2. Ensure `llama.cpp` is at the correct commit + +`stable-diffusion.cpp`'s RPC client is designed to work with a specific version of `llama.cpp` (compatible with the `ggml` submodule) to ensure API compatibility. The commit hash for `llama.cpp` is stored in `ggml/scripts/sync-llama.last`. + +> **Start from Root:** Perform these steps from the root of your `stable-diffusion.cpp` directory. + +1. Read the target commit hash from the submodule tracker: + + ```bash + # Linux / WSL / MacOS + HASH=$(cat ggml/scripts/sync-llama.last) + + # Windows (PowerShell) + $HASH = Get-Content -Path "ggml\scripts\sync-llama.last" + ``` + +2. Clone `llama.cpp` at the target commit . + ```bash + git clone https://github.com/ggml-org/llama.cpp.git + cd llama.cpp + git checkout $HASH + ``` + To save on download time and storage, you can use a shallow clone to download only the target commit: + ```bash + mkdir -p llama.cpp + cd llama.cpp + git init + git remote add origin https://github.com/ggml-org/llama.cpp.git + git fetch --depth 1 origin $HASH + git checkout FETCH_HEAD + ``` + +## 3. Build `llama.cpp` (RPC Server) + +The RPC server acts as the worker. You must explicitly enable the **backend** (the hardware interface, such as CUDA for Nvidia, Metal for Apple Silicon, or Vulkan) when building, otherwise the server will default to using only the CPU. + +To find the correct flags for your system, refer to the official documentation for the [`llama.cpp`](https://github.com/ggml-org/llama.cpp/blob/master/docs/build.md) repository. + +> **Crucial:** You must include the compiler flags required to satisfy the API compatibility with `stable-diffusion.cpp` (`-DGGML_MAX_NAME=128`). Without this flag, `GGML_MAX_NAME` will default to `64` for the server, and data transfers between the client and server will fail. Of course, `-DGGML_RPC` must also be enabled. +> +> I recommend disabling the `LLAMA_CURL` flag to avoid unnecessary dependencies, and disabling shared library builds to avoid potential conflicts. + +> **Build Target:** We are specifically building the `rpc-server` target. This prevents the build system from compiling the entire `llama.cpp` suite (like `llama-server`), making the build significantly faster. + +### Linux / WSL (Vulkan) + +```bash +mkdir build +cd build +cmake .. -DGGML_RPC=ON \ + -DGGML_VULKAN=ON \ # Ensure backend is enabled + -DGGML_BUILD_SHARED_LIBS=OFF \ + -DLLAMA_CURL=OFF \ + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 \ + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server -j $(nproc) +``` + +### macOS (Metal) + +```bash +mkdir build +cd build +cmake .. -DGGML_RPC=ON \ + -DGGML_METAL=ON \ + -DGGML_BUILD_SHARED_LIBS=OFF \ + -DLLAMA_CURL=OFF \ + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 \ + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server +``` + +### Windows (Visual Studio 2022, Vulkan) + +```powershell +mkdir build +cd build +cmake .. -G "Visual Studio 17 2022" -A x64 ` + -DGGML_RPC=ON ` + -DGGML_VULKAN=ON ` + -DGGML_BUILD_SHARED_LIBS=OFF ` + -DLLAMA_CURL=OFF ` + -DCMAKE_C_FLAGS=-DGGML_MAX_NAME=128 ` + -DCMAKE_CXX_FLAGS=-DGGML_MAX_NAME=128 +cmake --build . --config Release --target rpc-server +``` + +## 4. Usage + +Once both applications are built, you can run the server and the client to manage your GPU allocation. + +### Step A: Run the RPC Server + +Start the server. It listens for connections on the default address (usually `localhost:50052`). If your server is on a different machine, ensure the server binds to the correct interface and your firewall allows the connection. + +**On the Server :** +If running on the same machine, you can use the default address: + +```bash +./rpc-server +``` + +If you want to allow connections from other machines on the network: + +```bash +./rpc-server --host 0.0.0.0 +``` + +> **Security Warning:** The RPC server does not currently support authentication or encryption. **Only run the server on trusted local networks**. Never expose the RPC server directly to the open internet. + +> **Drivers & Hardware:** Ensure the Server machine has the necessary drivers installed and functional (e.g., Nvidia Drivers for CUDA, Vulkan SDK, or Metal). If no devices are found, the server will simply fallback to CPU usage. + +### Step B: Check if the client is able to connect to the server and see the available devices + +We're assuming the server is running on your local machine, and listening on the default port `50052`. If it's running on a different machine, you can replace `localhost` with the IP address of the server. + +**On the Client:** + +```bash +./sd-cli --rpc localhost:50052 --list-devices +``` + +If the server is running and the client is able to connect, you should see `RPC0 localhost:50052` in the list of devices. + +Example output: +(Client built without GPU acceleration, two GPUs available on the server) + +``` +List of available GGML devices: +Name Description +------------------- +CPU AMD Ryzen 9 5900X 12-Core Processor +RPC0 localhost:50052 +RPC1 localhost:50052 +``` + +### Step C: Run with RPC device + +If everything is working correctly, you can now run the client while offloading some or all of the work to the RPC server. + +Example: Setting the main backend to the RPC0 device for doing all the work on the server. + +```bash +./sd-cli -m models/sd1.5.safetensors -p "A cat" --rpc localhost:50052 --main-backend-device RPC0 +``` + +--- + +## 5. Scaling: Multiple RPC Servers + +You can connect the client to multiple RPC servers simultaneously to scale out your hardware usage. + +Example: A main machine (192.168.1.10) with 3 GPUs, with one GPU running CUDA and the other two running Vulkan, and a second machine (192.168.1.11) only one GPU. + +**On the first machine (Running two server instances):** + +**Terminal 1 (CUDA):** + +```bash +# Linux / WSL +export CUDA_VISIBLE_DEVICES=0 +cd ./build_cuda/bin/Release +./rpc-server --host 0.0.0.0 + +# Windows PowerShell +$env:CUDA_VISIBLE_DEVICES="0" +cd .\build_cuda\bin\Release +./rpc-server --host 0.0.0.0 +``` + +**Terminal 2 (Vulkan):** + +```bash +cd ./build_vulkan/bin/Release +# ignore the first GPU (used by CUDA server) +./rpc-server --host 0.0.0.0 --port 50053 -d Vulkan1,Vulkan2 +``` + +**On the second machine:** + +```bash +cd ./build/bin/Release +./rpc-server --host 0.0.0.0 +``` + +**On the Client:** +Pass multiple server addresses separated by commas. + +```bash +./sd-cli --rpc 192.168.1.10:50052,192.168.1.10:50053,192.168.1.11:50052 --list-devices +``` + +The client will map these servers to sequential device IDs (e.g., RPC0 from the first server, RPC2, RPC3 from the second, and RPC4 from the third). With this setup, you could for example use RPC0 for the main backend, RPC1 and RPC2 for the text encoders, and RPC3 for the VAE. + +--- + +## 6. Performance Considerations + +RPC performance is heavily dependent on network bandwidth, as large weights and activations must be transferred back and forth over the network, especially for large models, or when using high resolutions. For best results, ensure your network connection is stable and has sufficient bandwidth (>1Gbps recommended). diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 2dcd1d53a..29cef50fa 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -1,4 +1,7 @@ +sd_add_compile_flags() + include_directories(${CMAKE_CURRENT_SOURCE_DIR}) +include_directories(${PROJECT_SOURCE_DIR}/thirdparty) add_subdirectory(cli) -add_subdirectory(server) \ No newline at end of file +add_subdirectory(server) diff --git a/examples/cli/README.md b/examples/cli/README.md index 904f3c441..38565cf9b 100644 --- a/examples/cli/README.md +++ b/examples/cli/README.md @@ -19,6 +19,8 @@ CLI Options: -M, --mode run mode, one of [img_gen, vid_gen, upscale, convert], default: img_gen --preview preview method. must be one of the following [none, proj, tae, vae] (default is none) -h, --help show this help message and exit + --rpc add a rpc device + --list-devices list available ggml compute devices Context Options: -m, --model path to full model @@ -41,6 +43,17 @@ Context Options: --tensor-type-rules weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0") --photo-maker path to PHOTOMAKER model --upscale-model path to esrgan model. + --main-backend-device default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise + cpu) + --diffusion-backend-device device to use for diffusion (defaults to main-backend-device) + --clip-backend-device device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with + multiple encoders + --vae-backend-device device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified + --tae-backend-device device to use for tae (defaults to vae-backend-device) + --control-net-backend-device device to use for control net (defaults to main-backend-device) + --upscaler-backend-device device to use for upscaling models (defaults to main-backend-device) + --photomaker-backend-device device to use for photomaker (defaults to main-backend-device) + --vision-backend-device device to use for clip-vision model (defaults to main-backend-device) -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma @@ -49,9 +62,6 @@ Context Options: --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --mmap whether to memory-map model - --control-net-cpu keep controlnet in cpu (for low vram) - --clip-on-cpu keep clip in cpu (for low vram) - --vae-on-cpu keep vae in cpu (for low vram) --fa use flash attention --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model diff --git a/examples/cli/main.cpp b/examples/cli/main.cpp index f9e4928ea..55aa3ed91 100644 --- a/examples/cli/main.cpp +++ b/examples/cli/main.cpp @@ -12,6 +12,8 @@ #include #include +#include "ggml.h" + // #include "preprocessing.hpp" #include "stable-diffusion.h" @@ -46,6 +48,7 @@ struct SDCliParams { bool color = false; bool normal_exit = false; + bool skip_usage = false; ArgOptions get_options() { ArgOptions options; @@ -143,7 +146,28 @@ struct SDCliParams { auto on_help_arg = [&](int argc, const char** argv, int index) { normal_exit = true; - return -1; + return VALID_BREAK_OPT; + }; + + auto on_rpc_arg = [&](int argc, const char** argv, int index) { + if (++index >= argc) { + return -1; + } + const char* rpc_device = argv[index]; + add_rpc_device(rpc_device); + return 1; + }; + + auto on_list_devices_arg = [&](int argc, const char** argv, int index) { + size_t buff_size = backend_list_size(); + GGML_ASSERT(buff_size > 0); + char* buff = (char*)malloc(buff_size); + list_backends_to_buffer(buff, buff_size); + printf("List of available GGML devices:\nName\tDescription\n-------------------\n%s\n", buff); + free(buff); + normal_exit = true; + skip_usage = true; + return VALID_BREAK_OPT; }; options.manual_options = { @@ -159,6 +183,14 @@ struct SDCliParams { "--help", "show this help message and exit", on_help_arg}, + {"", + "--rpc", + "add a rpc device", + on_rpc_arg}, + {"", + "--list-devices", + "list available ggml compute devices", + on_list_devices_arg}, }; return options; @@ -213,7 +245,9 @@ void parse_args(int argc, const char** argv, SDCliParams& cli_params, SDContextP std::vector options_vec = {cli_params.get_options(), ctx_params.get_options(), gen_params.get_options()}; if (!parse_options(argc, argv, options_vec)) { - print_usage(argc, argv, options_vec); + if (!cli_params.skip_usage) { + print_usage(argc, argv, options_vec); + } exit(cli_params.normal_exit ? 0 : 1); } @@ -798,7 +832,8 @@ int main(int argc, const char* argv[]) { ctx_params.offload_params_to_cpu, ctx_params.diffusion_conv_direct, ctx_params.n_threads, - gen_params.upscale_tile_size); + gen_params.upscale_tile_size, + ctx_params.upscaler_backend_device.c_str()); if (upscaler_ctx == nullptr) { LOG_ERROR("new_upscaler_ctx failed"); diff --git a/examples/common/common.hpp b/examples/common/common.hpp index 9389b03a3..64d3e026f 100644 --- a/examples/common/common.hpp +++ b/examples/common/common.hpp @@ -34,6 +34,8 @@ namespace fs = std::filesystem; #define SAFE_STR(s) ((s) ? (s) : "") #define BOOL_STR(b) ((b) ? "true" : "false") +#define VALID_BREAK_OPT -42 + const char* modes_str[] = { "img_gen", "vid_gen", @@ -401,16 +403,26 @@ static bool parse_options(int argc, const char** argv, const std::vector embedding_map; std::vector embedding_vec; @@ -454,9 +476,6 @@ struct SDContextParams { rng_type_t sampler_rng_type = RNG_TYPE_COUNT; bool offload_params_to_cpu = false; bool enable_mmap = false; - bool control_net_cpu = false; - bool clip_on_cpu = false; - bool vae_on_cpu = false; bool flash_attn = false; bool diffusion_flash_attn = false; bool diffusion_conv_direct = false; @@ -562,6 +581,43 @@ struct SDContextParams { "--upscale-model", "path to esrgan model.", &esrgan_path}, + {"", + "--main-backend-device", + "default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise cpu)", + &main_backend_device}, + {"", + "--diffusion-backend-device", + "device to use for diffusion (defaults to main-backend-device)", + &diffusion_backend_device}, + {"", + "--clip-backend-device", + "device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with multiple encoders", + &clip_backend_device}, + {"", + "--vae-backend-device", + "device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified", + &vae_backend_device}, + {"", + "--tae-backend-device", + "device to use for tae (defaults to vae-backend-device)", + &tae_backend_device}, + {"", + "--control-net-backend-device", + "device to use for control net (defaults to main-backend-device)", + &control_net_backend_device}, + {"", + "--upscaler-backend-device", + "device to use for upscaling models (defaults to main-backend-device)", + &upscaler_backend_device}, + {"", + "--photomaker-backend-device", + "device to use for photomaker (defaults to main-backend-device)", + &photomaker_backend_device}, + {"", + "--vision-backend-device", + "device to use for clip-vision model (defaults to main-backend-device)", + &vision_backend_device}, + }; options.int_options = { @@ -600,18 +656,6 @@ struct SDContextParams { "--mmap", "whether to memory-map model", true, &enable_mmap}, - {"", - "--control-net-cpu", - "keep controlnet in cpu (for low vram)", - true, &control_net_cpu}, - {"", - "--clip-on-cpu", - "keep clip in cpu (for low vram)", - true, &clip_on_cpu}, - {"", - "--vae-on-cpu", - "keep vae in cpu (for low vram)", - true, &vae_on_cpu}, {"", "--fa", "use flash attention", @@ -876,6 +920,7 @@ struct SDContextParams { std::string embeddings_str = emb_ss.str(); std::ostringstream oss; + // TODO backend devices oss << "SDContextParams {\n" << " n_threads: " << n_threads << ",\n" << " model_path: \"" << model_path << "\",\n" @@ -901,9 +946,9 @@ struct SDContextParams { << " sampler_rng_type: " << sd_rng_type_name(sampler_rng_type) << ",\n" << " offload_params_to_cpu: " << (offload_params_to_cpu ? "true" : "false") << ",\n" << " enable_mmap: " << (enable_mmap ? "true" : "false") << ",\n" - << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" - << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" - << " vae_on_cpu: " << (vae_on_cpu ? "true" : "false") << ",\n" + // << " control_net_cpu: " << (control_net_cpu ? "true" : "false") << ",\n" + // << " clip_on_cpu: " << (clip_on_cpu ? "true" : "false") << ",\n" + // << " vae_on_cpu: " << (vae_on_cpu ? "true" : "false") << ",\n" << " flash_attn: " << (flash_attn ? "true" : "false") << ",\n" << " diffusion_flash_attn: " << (diffusion_flash_attn ? "true" : "false") << ",\n" << " diffusion_conv_direct: " << (diffusion_conv_direct ? "true" : "false") << ",\n" @@ -966,9 +1011,6 @@ struct SDContextParams { lora_apply_mode, offload_params_to_cpu, enable_mmap, - clip_on_cpu, - control_net_cpu, - vae_on_cpu, flash_attn, diffusion_flash_attn, taesd_preview, @@ -981,6 +1023,14 @@ struct SDContextParams { chroma_use_t5_mask, chroma_t5_mask_pad, qwen_image_zero_cond_t, + main_backend_device.c_str(), + diffusion_backend_device.c_str(), + clip_backend_device.c_str(), + vae_backend_device.c_str(), + tae_backend_device.c_str(), + control_net_backend_device.c_str(), + photomaker_backend_device.c_str(), + vision_backend_device.c_str(), }; return sd_ctx_params; } diff --git a/examples/server/README.md b/examples/server/README.md index 8aa2158f5..5a44e6c4b 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -121,6 +121,17 @@ Context Options: --tensor-type-rules weight type per tensor pattern (example: "^vae\.=f16,model\.=q8_0") --photo-maker path to PHOTOMAKER model --upscale-model path to esrgan model. + --main-backend-device default device to use for all backends (defaults to main gpu device if hardware acceleration is available, otherwise + cpu) + --diffusion-backend-device device to use for diffusion (defaults to main-backend-device) + --clip-backend-device device to use for clip (defaults to main-backend-device). Can be a comma-separated list of devices for models with + multiple encoders + --vae-backend-device device to use for vae (defaults to main-backend-device). Also applies to tae, unless tae-backend-device is specified + --tae-backend-device device to use for tae (defaults to vae-backend-device) + --control-net-backend-device device to use for control net (defaults to main-backend-device) + --upscaler-backend-device device to use for upscaling models (defaults to main-backend-device) + --photomaker-backend-device device to use for photomaker (defaults to main-backend-device) + --vision-backend-device device to use for clip-vision model (defaults to main-backend-device) -t, --threads number of threads to use during computation (default: -1). If threads <= 0, then threads will be set to the number of CPU physical cores --chroma-t5-mask-pad t5 mask pad size of chroma @@ -129,9 +140,6 @@ Context Options: --force-sdxl-vae-conv-scale force use of conv scale on sdxl vae --offload-to-cpu place the weights in RAM to save VRAM, and automatically load them into VRAM when needed --mmap whether to memory-map model - --control-net-cpu keep controlnet in cpu (for low vram) - --clip-on-cpu keep clip in cpu (for low vram) - --vae-on-cpu keep vae in cpu (for low vram) --fa use flash attention --diffusion-fa use flash attention in the diffusion model only --diffusion-conv-direct use ggml_conv2d_direct in the diffusion model diff --git a/examples/server/main.cpp b/examples/server/main.cpp index 6e4340a61..156ba7c41 100644 --- a/examples/server/main.cpp +++ b/examples/server/main.cpp @@ -909,9 +909,10 @@ int main(int argc, const char** argv) { } } - auto get_sample_method = [](std::string name) -> enum sample_method_t { + auto get_sample_method = [](std::string name)->enum sample_method_t { enum sample_method_t result = str_to_sample_method(name.c_str()); - if (result != SAMPLE_METHOD_COUNT) return result; + if (result != SAMPLE_METHOD_COUNT) + return result; // some applications use a hardcoded sampler list std::transform(name.begin(), name.end(), name.begin(), [](unsigned char c) { return std::tolower(c); }); @@ -932,8 +933,9 @@ int main(int argc, const char** argv) { {"k_res_multistep", RES_MULTISTEP_SAMPLE_METHOD}, {"res 2s", RES_2S_SAMPLE_METHOD}, {"k_res_2s", RES_2S_SAMPLE_METHOD}}; - auto it = hardcoded.find(name); - if (it != hardcoded.end()) return it->second; + auto it = hardcoded.find(name); + if (it != hardcoded.end()) + return it->second; return SAMPLE_METHOD_COUNT; }; diff --git a/format-code.ps1 b/format-code.ps1 new file mode 100644 index 000000000..7f6d00727 --- /dev/null +++ b/format-code.ps1 @@ -0,0 +1,54 @@ +param( + [switch]$DryRun +) + +$ErrorActionPreference = "Stop" + +$repoRoot = $PSScriptRoot +if (-not $repoRoot) { + $repoRoot = (Get-Location).Path +} + +$patterns = @( + "src/*.cpp" + "src/*.h" + "src/*.hpp" + "src/vocab/*.h" + "src/vocab/*.cpp" + "examples/cli/*.cpp" + "examples/common/*.hpp" + "examples/cli/*.h" + "examples/server/*.cpp" +) + +Push-Location $repoRoot +try { + if (-not $DryRun) { + $null = Get-Command clang-format -ErrorAction Stop + } + + foreach ($pattern in $patterns) { + $files = Get-ChildItem -Path $pattern -File -ErrorAction SilentlyContinue | Sort-Object FullName + + foreach ($file in $files) { + $relativePath = $file.FullName.Substring($repoRoot.Length).TrimStart('\', '/') -replace '\\', '/' + + if ($relativePath -like "vocab*") { + continue + } + + Write-Host "formatting '$relativePath'" + + # if ($file.Name -ne "stable-diffusion.h") { + # clang-tidy -fix -p build_linux/ "$relativePath" + # } + + if (-not $DryRun) { + & clang-format -style=file -i $file.FullName + } + } + } +} +finally { + Pop-Location +} diff --git a/include/stable-diffusion.h b/include/stable-diffusion.h index 029c2ab1d..521875034 100644 --- a/include/stable-diffusion.h +++ b/include/stable-diffusion.h @@ -186,9 +186,9 @@ typedef struct { enum lora_apply_mode_t lora_apply_mode; bool offload_params_to_cpu; bool enable_mmap; - bool keep_clip_on_cpu; - bool keep_control_net_on_cpu; - bool keep_vae_on_cpu; + // bool keep_clip_on_cpu; + // bool keep_control_net_on_cpu; + // bool keep_vae_on_cpu; bool flash_attn; bool diffusion_flash_attn; bool tae_preview_only; @@ -201,6 +201,14 @@ typedef struct { bool chroma_use_t5_mask; int chroma_t5_mask_pad; bool qwen_image_zero_cond_t; + const char* main_device; + const char* diffusion_device; + const char* clip_device; + const char* vae_device; + const char* tae_device; + const char* control_net_device; + const char* photomaker_device; + const char* vision_device; } sd_ctx_params_t; typedef struct { @@ -389,7 +397,8 @@ SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path, bool offload_params_to_cpu, bool direct, int n_threads, - int tile_size); + int tile_size, + const char * device); SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx); SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, @@ -415,6 +424,11 @@ SD_API bool preprocess_canny(sd_image_t image, SD_API const char* sd_commit(void); SD_API const char* sd_version(void); +SD_API size_t backend_list_size(void); +SD_API void list_backends_to_buffer(char* buffer, size_t buffer_size); + +SD_API void add_rpc_device(const char* address); + #ifdef __cplusplus } #endif diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 000000000..54496add4 --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,35 @@ +sd_add_compile_flags() + +# +# libraries +# + +# stable-diffusion +file(GLOB SD_SOURCES "*.cpp" "*.hpp" "*.h" "*.hpp") +file(GLOB SD_VOCAB_SOURCES "vocab/*.h" "vocab/*.cpp") + +add_library(${SD_LIB} + ../include/stable-diffusion.h + ${SD_SOURCES} + ${SD_VOCAB_SOURCES} +) + +set_target_properties(${SD_LIB} PROPERTIES + VERSION ${SD_INSTALL_VERSION} + SOVERSION 0 + MACHO_CURRENT_VERSION 0 # keep macOS linker from seeing oversized version number +) + + + +target_include_directories(${SD_LIB} PRIVATE .) +target_include_directories(${SD_LIB} PUBLIC ../include) +target_compile_features(${SD_LIB} PRIVATE c_std_11 cxx_std_17) + +target_link_libraries(${SD_LIB} PUBLIC ggml PRIVATE zip) + +if (BUILD_SHARED_LIBS) + set_target_properties(${SD_LIB} PROPERTIES POSITION_INDEPENDENT_CODE ON) + target_compile_definitions(${SD_LIB} PRIVATE SD_BUILD_DLL) + target_compile_definitions(${SD_LIB} PUBLIC SD_BUILD_SHARED_LIB ) +endif() diff --git a/src/auto_encoder_kl.hpp b/src/auto_encoder_kl.hpp index 6efdb41a2..ebc7a6f6f 100644 --- a/src/auto_encoder_kl.hpp +++ b/src/auto_encoder_kl.hpp @@ -780,22 +780,22 @@ struct AutoEncoderKL : public VAE { -0.0511f, -0.0603f, -0.0478f, -0.0524f, -0.0227f, -0.0274f, -0.0154f, -0.0255f, -0.0572f, -0.0565f, -0.0518f, -0.0496f, 0.0116f, 0.0054f, 0.0163f, 0.0104f}; latents_std_vec = { - 1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f, - 1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f, - 1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f, - 1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f, - 1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f, - 1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f, - 1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f, - 1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f, - 1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f, - 1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f, - 1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f, - 1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f, - 1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f, - 1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f, - 1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f, - 1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f}; + 1.8029f, 1.7786f, 1.7868f, 1.7837f, 1.7717f, 1.7590f, 1.7610f, 1.7479f, + 1.7336f, 1.7373f, 1.7340f, 1.7343f, 1.8626f, 1.8527f, 1.8629f, 1.8589f, + 1.7593f, 1.7526f, 1.7556f, 1.7583f, 1.7363f, 1.7400f, 1.7355f, 1.7394f, + 1.7342f, 1.7246f, 1.7392f, 1.7304f, 1.7551f, 1.7513f, 1.7559f, 1.7488f, + 1.8449f, 1.8454f, 1.8550f, 1.8535f, 1.8240f, 1.7813f, 1.7854f, 1.7945f, + 1.8047f, 1.7876f, 1.7695f, 1.7676f, 1.7782f, 1.7667f, 1.7925f, 1.7848f, + 1.7579f, 1.7407f, 1.7483f, 1.7368f, 1.7961f, 1.7998f, 1.7920f, 1.7925f, + 1.7780f, 1.7747f, 1.7727f, 1.7749f, 1.7526f, 1.7447f, 1.7657f, 1.7495f, + 1.7775f, 1.7720f, 1.7813f, 1.7813f, 1.8162f, 1.8013f, 1.8023f, 1.8033f, + 1.7527f, 1.7331f, 1.7563f, 1.7482f, 1.7610f, 1.7507f, 1.7681f, 1.7613f, + 1.7665f, 1.7545f, 1.7828f, 1.7726f, 1.7896f, 1.7999f, 1.7864f, 1.7760f, + 1.7613f, 1.7625f, 1.7560f, 1.7577f, 1.7783f, 1.7671f, 1.7810f, 1.7799f, + 1.7201f, 1.7068f, 1.7265f, 1.7091f, 1.7793f, 1.7578f, 1.7502f, 1.7455f, + 1.7587f, 1.7500f, 1.7525f, 1.7362f, 1.7616f, 1.7572f, 1.7444f, 1.7430f, + 1.7509f, 1.7610f, 1.7634f, 1.7612f, 1.7254f, 1.7135f, 1.7321f, 1.7226f, + 1.7664f, 1.7624f, 1.7718f, 1.7664f, 1.7457f, 1.7441f, 1.7569f, 1.7530f}; } else { GGML_ABORT("unknown version %d", version); } diff --git a/src/cache_dit.hpp b/src/cache_dit.hpp index 9af627fba..0d1047847 100644 --- a/src/cache_dit.hpp +++ b/src/cache_dit.hpp @@ -839,7 +839,7 @@ struct CacheDitConditionState { float* input_data = (float*)input->data; float diff = CacheDitState::calculate_residual_diff( - it->second.prev_input.data(), input_data, ne); + it->second.prev_input.data(), input_data, ne); float effective_threshold = config.residual_diff_threshold; if (config.Fn_compute_blocks > 0) { diff --git a/src/common_block.hpp b/src/common_block.hpp index 2cef389af..82e95e750 100644 --- a/src/common_block.hpp +++ b/src/common_block.hpp @@ -1,7 +1,9 @@ #ifndef __COMMON_BLOCK_HPP__ #define __COMMON_BLOCK_HPP__ +#include "ggml-backend.h" #include "ggml_extend.hpp" +#include "util.h" class DownSampleBlock : public GGMLBlock { protected: @@ -248,9 +250,6 @@ class FeedForward : public GGMLBlock { float scale = 1.f; if (precision_fix) { scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif } // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using Vulkan without enabling force_prec_f32, @@ -264,6 +263,9 @@ class FeedForward : public GGMLBlock { auto net_0 = std::dynamic_pointer_cast(blocks["net.0"]); auto net_2 = std::dynamic_pointer_cast(blocks["net.2"]); + if (sd_backend_is(ctx->backend, "Vulkan")) { + net_2->set_force_prec_f32(true); + } x = net_0->forward(ctx, x); // [ne3, ne2, ne1, inner_dim] x = net_2->forward(ctx, x); // [ne3, ne2, ne1, dim_out] diff --git a/src/conditioner.hpp b/src/conditioner.hpp index 534a2f11f..6af1d4cae 100644 --- a/src/conditioner.hpp +++ b/src/conditioner.hpp @@ -2,8 +2,11 @@ #define __CONDITIONER_HPP__ #include "clip.hpp" +#include "ggml-alloc.h" +#include "ggml-backend.h" #include "llm.hpp" #include "t5.hpp" +#include "util.h" struct SDCondition { ggml_tensor* c_crossattn = nullptr; // aka context @@ -32,6 +35,7 @@ struct ConditionerParams { }; struct Conditioner { + int model_count = 1; virtual SDCondition get_learned_condition(ggml_context* work_ctx, int n_threads, const ConditionerParams& conditioner_params) = 0; @@ -50,6 +54,11 @@ struct Conditioner { const std::string& prompt) { GGML_ABORT("Not implemented yet!"); } + virtual bool is_cond_stage_model_name_at_index(const std::string& name, int index) { + return true; + } + virtual ggml_backend_t get_params_backend_at_index(int index) = 0; + virtual ggml_backend_t get_runtime_backend_at_index(int index) = 0; }; // ldm.modules.encoders.modules.FrozenCLIPEmbedder @@ -68,7 +77,7 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { std::vector token_embed_custom; std::map> embedding_pos_map; - FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend, + FrozenCLIPEmbedderWithCustomWords(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map, const std::map& orig_embedding_map, @@ -82,13 +91,28 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { tokenizer.add_special_token(name); } bool force_clip_f32 = !embedding_map.empty(); + + ggml_backend_t clip_backend = backends[0]; + if (sd_version_is_sd1(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, true, force_clip_f32); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, true, force_clip_f32); } else if (sd_version_is_sd2(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); + LOG_INFO("CLIP-H: using %s backend", ggml_backend_name(clip_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, true, force_clip_f32); } else if (sd_version_is_sdxl(version)) { - text_model = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, false, force_clip_f32); - text_model2 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false, force_clip_f32); + model_count = 2; + ggml_backend_t clip_g_backend = clip_backend; + if (backends.size() >= 2) { + clip_g_backend = backends[1]; + if (backends.size() > 2) { + LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); + } + } + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_backend)); + LOG_INFO("CLIP-G: using %s backend", ggml_backend_name(clip_g_backend)); + text_model = std::make_shared(clip_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, false, force_clip_f32); + text_model2 = std::make_shared(clip_g_backend, offload_params_to_cpu, tensor_storage_map, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false, force_clip_f32); } } @@ -648,6 +672,41 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner { conditioner_params.adm_in_channels, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (sd_version_is_sdxl(version)) { + if (index == 0) { + return contains(name, "cond_stage_model.model.transformer"); + } else if (index == 1) { + return contains(name, "cond_stage_model.model.1"); + } else { + return false; + } + } + return true; + } + + ggml_backend_t get_params_backend_at_index(int index) { + if (sd_version_is_sdxl(version) && index == 1) { + if (text_model2) { + return text_model2->get_params_backend(); + } + } else if (text_model) { + return text_model->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index) { + if (sd_version_is_sdxl(version) && index == 1) { + if (text_model2) { + return text_model2->get_runtime_backend(); + } + } else if (text_model) { + return text_model->get_runtime_backend(); + } + return nullptr; + } }; struct FrozenCLIPVisionEmbedder : public GGMLRunner { @@ -715,13 +774,31 @@ struct SD3CLIPEmbedder : public Conditioner { std::shared_ptr clip_g; std::shared_ptr t5; - SD3CLIPEmbedder(ggml_backend_t backend, + SD3CLIPEmbedder(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}) : clip_g_tokenizer(0) { bool use_clip_l = false; bool use_clip_g = false; bool use_t5 = false; + + model_count = 3; + + ggml_backend_t clip_l_backend, clip_g_backend, t5_backend; + if (backends.size() == 1) { + clip_l_backend = clip_g_backend = t5_backend = backends[0]; + } else if (backends.size() == 2) { + clip_l_backend = clip_g_backend = backends[0]; + t5_backend = backends[1]; + } else if (backends.size() >= 3) { + clip_l_backend = backends[0]; + clip_g_backend = backends[1]; + t5_backend = backends[2]; + if (backends.size() > 3) { + LOG_WARN("More than 3 clip backends provided, but the model only supports 3 text encoders. Ignoring the rest."); + } + } + for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -736,13 +813,16 @@ struct SD3CLIPEmbedder : public Conditioner { return; } if (use_clip_l) { - clip_l = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_l_backend)); + clip_l = std::make_shared(clip_l_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, false); } if (use_clip_g) { - clip_g = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false); + LOG_INFO("CLIP-G: using %s backend", ggml_backend_name(clip_g_backend)); + clip_g = std::make_shared(clip_g_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, false); } if (use_t5) { - t5 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); + LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(t5_backend)); + t5 = std::make_shared(t5_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); } } @@ -1139,6 +1219,42 @@ struct SD3CLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (index == 0) { + return contains(name, "text_encoders.clip_l"); + } else if (index == 1) { + return contains(name, "text_encoders.clip_g"); + } else if (index == 2) { + return contains(name, "text_encoders.t5xxl"); + } else { + return false; + } + } + + ggml_backend_t get_params_backend_at_index(int index) { + if (index == 0 && clip_l) { + return clip_l->get_params_backend(); + } else if (index == 1 && clip_g) { + return clip_g->get_params_backend(); + } else if (index == 2 && t5) { + return t5->get_params_backend(); + } else { + return nullptr; + } + } + + ggml_backend_t get_runtime_backend_at_index(int index) { + if (index == 0 && clip_l) { + return clip_l->get_runtime_backend(); + } else if (index == 1 && clip_g) { + return clip_g->get_runtime_backend(); + } else if (index == 2 && t5) { + return t5->get_runtime_backend(); + } else { + return nullptr; + } + } }; struct FluxCLIPEmbedder : public Conditioner { @@ -1148,11 +1264,25 @@ struct FluxCLIPEmbedder : public Conditioner { std::shared_ptr t5; size_t chunk_len = 256; - FluxCLIPEmbedder(ggml_backend_t backend, + FluxCLIPEmbedder(std::vector backends, bool offload_params_to_cpu, const String2TensorStorage& tensor_storage_map = {}) { bool use_clip_l = false; bool use_t5 = false; + + model_count = 2; + + ggml_backend_t clip_l_backend, t5_backend; + if (backends.size() == 1) { + clip_l_backend = t5_backend = backends[0]; + } else if (backends.size() >= 2) { + clip_l_backend = backends[0]; + t5_backend = backends[1]; + if (backends.size() > 2) { + LOG_WARN("More than 2 clip backends provided, but the model only supports 2 text encoders. Ignoring the rest."); + } + } + for (auto pair : tensor_storage_map) { if (pair.first.find("text_encoders.clip_l") != std::string::npos) { use_clip_l = true; @@ -1167,12 +1297,14 @@ struct FluxCLIPEmbedder : public Conditioner { } if (use_clip_l) { - clip_l = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true); + LOG_INFO("CLIP-L: using %s backend", ggml_backend_name(clip_l_backend)); + clip_l = std::make_shared(clip_l_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, true); } else { LOG_WARN("clip_l text encoder not found! Prompt adherence might be degraded."); } if (use_t5) { - t5 = std::make_shared(backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); + LOG_INFO("T5-XXL: using %s backend", ggml_backend_name(t5_backend)); + t5 = std::make_shared(t5_backend, offload_params_to_cpu, tensor_storage_map, "text_encoders.t5xxl.transformer"); } else { LOG_WARN("t5xxl text encoder not found! Prompt adherence might be degraded."); } @@ -1416,6 +1548,36 @@ struct FluxCLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + bool is_cond_stage_model_name_at_index(const std::string& name, int index) override { + if (index == 0) { + return contains(name, "text_encoders.clip_l"); + } else if (index == 1) { + return contains(name, "text_encoders.t5xxl"); + } else { + return false; + } + } + + ggml_backend_t get_params_backend_at_index(int index) { + if (index == 0 && clip_l) { + return clip_l->get_params_backend(); + } else if (index == 1 && t5) { + return t5->get_params_backend(); + } else { + return nullptr; + } + } + + ggml_backend_t get_runtime_backend_at_index(int index) { + if (index == 0 && clip_l) { + return clip_l->get_runtime_backend(); + } else if (index == 1 && t5) { + return t5->get_runtime_backend(); + } else { + return nullptr; + } + } }; struct T5CLIPEmbedder : public Conditioner { @@ -1639,6 +1801,20 @@ struct T5CLIPEmbedder : public Conditioner { conditioner_params.clip_skip, conditioner_params.zero_out_masked); } + + ggml_backend_t get_params_backend_at_index(int index) { + if (t5) { + return t5->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index) { + if (t5) { + return t5->get_runtime_backend(); + } + return nullptr; + } }; struct AnimaConditioner : public Conditioner { @@ -1651,11 +1827,11 @@ struct AnimaConditioner : public Conditioner { const String2TensorStorage& tensor_storage_map = {}) { qwen_tokenizer = std::make_shared(); llm = std::make_shared(LLM::LLMArch::QWEN3, - backend, - offload_params_to_cpu, - tensor_storage_map, - "text_encoders.llm", - false); + backend, + offload_params_to_cpu, + tensor_storage_map, + "text_encoders.llm", + false); } void get_param_tensors(std::map& tensors) override { @@ -1775,6 +1951,20 @@ struct AnimaConditioner : public Conditioner { return {hidden_states, t5_weight_tensor, t5_ids_tensor}; } + + ggml_backend_t get_params_backend_at_index(int index) { + if (llm) { + return llm->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index) { + if (llm) { + return llm->get_runtime_backend(); + } + return nullptr; + } }; struct LLMEmbedder : public Conditioner { @@ -2149,6 +2339,20 @@ struct LLMEmbedder : public Conditioner { LOG_DEBUG("computing condition graph completed, taking %" PRId64 " ms", t1 - t0); return {hidden_states, nullptr, nullptr, extra_hidden_states_vec}; } + + ggml_backend_t get_params_backend_at_index(int index) { + if (llm) { + return llm->get_params_backend(); + } + return nullptr; + } + + ggml_backend_t get_runtime_backend_at_index(int index) { + if (llm) { + return llm->get_runtime_backend(); + } + return nullptr; + } }; #endif diff --git a/src/diffusion_model.hpp b/src/diffusion_model.hpp index 07d9df898..3068628ac 100644 --- a/src/diffusion_model.hpp +++ b/src/diffusion_model.hpp @@ -37,7 +37,7 @@ struct DiffusionModel { virtual void free_compute_buffer() = 0; virtual void get_param_tensors(std::map& tensors) = 0; virtual size_t get_params_buffer_size() = 0; - virtual void set_weight_adapter(const std::shared_ptr& adapter){}; + virtual void set_weight_adapter(const std::shared_ptr& adapter) {}; virtual int64_t get_adm_in_channels() = 0; virtual void set_flash_attention_enabled(bool enabled) = 0; virtual void set_circular_axes(bool circular_x, bool circular_y) = 0; diff --git a/src/ggml_extend.hpp b/src/ggml_extend.hpp index e6b27cc7c..63bc7dbbb 100644 --- a/src/ggml_extend.hpp +++ b/src/ggml_extend.hpp @@ -28,26 +28,6 @@ #include "model.h" -#ifdef SD_USE_CUDA -#include "ggml-cuda.h" -#endif - -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif - -#ifdef SD_USE_SYCL -#include "ggml-sycl.h" -#endif - #include "rng.hpp" #include "util.h" @@ -88,6 +68,42 @@ __STATIC_INLINE__ void ggml_log_callback_default(ggml_log_level level, const cha } } +__STATIC_INLINE__ bool backend_name_exists(std::string name) { + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + if (name == ggml_backend_dev_name(ggml_backend_dev_get(i))) { + return true; + } + } + return false; +} + +__STATIC_INLINE__ std::string sanitize_backend_name(std::string name) { + if (name == "" || backend_name_exists(name)) { + return name; + } else { + LOG_WARN("Backend %s not found, using default backend", name.c_str()); + return ""; + } +} + +__STATIC_INLINE__ std::string get_default_backend_name() { + // should pick the same backend as ggml_backend_init_best + ggml_backend_dev_t dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_GPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_IGPU); + dev = dev ? dev : ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + return ggml_backend_dev_name(dev); +} + +__STATIC_INLINE__ ggml_backend_t init_named_backend(std::string name = "") { + LOG_DEBUG("Initializing backend: %s", name.c_str()); + if (name.empty()) { + return ggml_backend_init_best(); + } else { + return ggml_backend_init_by_name(name.c_str(), nullptr); + } +} + static_assert(GGML_MAX_NAME >= 128, "GGML_MAX_NAME must be at least 128"); // n-mode tensor-matrix product @@ -1283,25 +1299,25 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_ones_like(ggml_context* ctx, return ggml_ext_ones(ctx, x->ne[0], x->ne[1], x->ne[2], x->ne[3]); } -__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_tensor* a) { -#ifdef SD_USE_VULKAN - auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); - auto out = ggml_reshape_1d(ctx, a, ggml_nelements(a)); - out = ggml_get_rows(ctx, out, zero_index); - out = ggml_reshape(ctx, out, a); - // auto out = ggml_cast(ctx, a, GGML_TYPE_F32); - return out; -#else - auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); - ggml_tensor* one = ggml_ext_ones(ctx, 1, 1, 1, 1); // [1,] - if (ggml_is_transposed(out)) { - out = ggml_mul_mat(ctx, one, out); +__STATIC_INLINE__ ggml_tensor* ggml_ext_cast_f32(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* a) { + if (sd_backend_is(backend, "Vulkan")) { + auto zero_index = ggml_get_tensor(ctx, "ggml_runner_build_in_tensor:zero_int"); + auto out = ggml_reshape_1d(ctx, a, ggml_nelements(a)); + out = ggml_get_rows(ctx, out, zero_index); + out = ggml_reshape(ctx, out, a); + // auto out = ggml_cast(ctx, a, GGML_TYPE_F32); + return out; } else { - out = ggml_mul_mat(ctx, out, one); + auto out = ggml_reshape_2d(ctx, a, 1, ggml_nelements(a)); + ggml_tensor* one = ggml_ext_ones(ctx, 1, 1, 1, 1); // [1,] + if (ggml_is_transposed(out)) { + out = ggml_mul_mat(ctx, one, out); + } else { + out = ggml_mul_mat(ctx, out, one); + } + out = ggml_reshape(ctx, out, a); + return out; } - out = ggml_reshape(ctx, out, a); -#endif - return out; } // q: [N, L_q, C(n_head*d_head)] or [N*n_head, L_q, d_head] @@ -1493,16 +1509,14 @@ __STATIC_INLINE__ ggml_tensor* ggml_ext_group_norm(ggml_context* ctx, } __STATIC_INLINE__ void ggml_ext_backend_tensor_get_and_sync(ggml_backend_t backend, const ggml_tensor* tensor, void* data, size_t offset, size_t size) { -#if defined(SD_USE_CUDA) || defined(SD_USE_SYCL) - if (!ggml_backend_is_cpu(backend)) { + if ((sd_backend_is(backend, "ROCm") || sd_backend_is(backend, "CUDA") || sd_backend_is(backend, "SYCL")) && + !ggml_backend_is_cpu(backend)) { ggml_backend_tensor_get_async(backend, tensor, data, offset, size); ggml_backend_synchronize(backend); - } else { - ggml_backend_tensor_get(tensor, data, offset, size); + return; } -#else + ggml_backend_tensor_get(tensor, data, offset, size); -#endif } __STATIC_INLINE__ float ggml_ext_backend_tensor_get_f32(ggml_tensor* tensor) { @@ -1649,14 +1663,15 @@ struct WeightAdapter { float scale = 1.f; } conv2d; }; - virtual ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) = 0; + virtual ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name) = 0; virtual ggml_tensor* forward_with_lora(ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* x, ggml_tensor* w, ggml_tensor* b, const std::string& prefix, - ForwardParams forward_params) = 0; - virtual size_t get_extra_graph_size() = 0; + ForwardParams forward_params) = 0; + virtual size_t get_extra_graph_size() = 0; }; struct GGMLRunnerContext { @@ -2135,6 +2150,14 @@ struct GGMLRunner { void set_weight_adapter(const std::shared_ptr& adapter) { weight_adapter = adapter; } + + ggml_backend_t get_runtime_backend() { + return runtime_backend; + } + + ggml_backend_t get_params_backend() { + return params_backend; + } }; class GGMLBlock { @@ -2279,6 +2302,14 @@ class Linear : public UnaryBlock { force_prec_f32(force_prec_f32), scale(scale) {} + void set_scale(float scale_) { + scale = scale_; + } + + void set_force_prec_f32(bool force_prec_f32_) { + force_prec_f32 = force_prec_f32_; + } + ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) { ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; @@ -2290,7 +2321,7 @@ class Linear : public UnaryBlock { forward_params.op_type = WeightAdapter::ForwardParams::op_type_t::OP_LINEAR; forward_params.linear.force_prec_f32 = force_prec_f32; forward_params.linear.scale = scale; - return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, x, w, b, prefix, forward_params); + return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, ctx->backend, x, w, b, prefix, forward_params); } return ggml_ext_linear(ctx->ggml_ctx, x, w, b, force_prec_f32, scale); } @@ -2406,7 +2437,7 @@ class Conv2d : public UnaryBlock { forward_params.conv2d.circular_x = ctx->circular_x_enabled; forward_params.conv2d.circular_y = ctx->circular_y_enabled; forward_params.conv2d.scale = scale; - return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, x, w, b, prefix, forward_params); + return ctx->weight_adapter->forward_with_lora(ctx->ggml_ctx, ctx->backend, x, w, b, prefix, forward_params); } return ggml_ext_conv_2d(ctx->ggml_ctx, x, @@ -2470,7 +2501,7 @@ class Conv3d : public UnaryBlock { ggml_tensor* w = params["weight"]; ggml_tensor* b = nullptr; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); if (w->type != GGML_TYPE_F16) { w = ggml_cast(ctx->ggml_ctx, w, GGML_TYPE_F16); } @@ -2478,7 +2509,7 @@ class Conv3d : public UnaryBlock { if (bias) { b = params["bias"]; if (ctx->weight_adapter) { - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } return ggml_ext_conv_3d(ctx->ggml_ctx, x, w, b, in_channels, @@ -2525,12 +2556,12 @@ class LayerNorm : public UnaryBlock { if (elementwise_affine) { w = params["weight"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); } if (bias) { b = params["bias"]; if (ctx->weight_adapter) { - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } } @@ -2573,8 +2604,8 @@ class GroupNorm : public GGMLBlock { w = params["weight"]; b = params["bias"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); - b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, b, prefix + "bias"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); + b = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, b, prefix + "bias"); } } return ggml_ext_group_norm(ctx->ggml_ctx, x, w, b, num_groups); @@ -2608,7 +2639,7 @@ class RMSNorm : public UnaryBlock { ggml_tensor* forward(GGMLRunnerContext* ctx, ggml_tensor* x) { ggml_tensor* w = params["weight"]; if (ctx->weight_adapter) { - w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, w, prefix + "weight"); + w = ctx->weight_adapter->patch_weight(ctx->ggml_ctx, ctx->backend, w, prefix + "weight"); } x = ggml_rms_norm(ctx->ggml_ctx, x, eps); x = ggml_mul_inplace(ctx->ggml_ctx, x, w); diff --git a/src/lora.hpp b/src/lora.hpp index 7df04ea27..2ddca167f 100644 --- a/src/lora.hpp +++ b/src/lora.hpp @@ -129,7 +129,7 @@ struct LoraModel : public GGMLRunner { } } - ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_lora_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -152,17 +152,17 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(lora_up_name); if (iter != lora_tensors.end()) { - lora_up = ggml_ext_cast_f32(ctx, iter->second); + lora_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lora_mid_name); if (iter != lora_tensors.end()) { - lora_mid = ggml_ext_cast_f32(ctx, iter->second); + lora_mid = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lora_down_name); if (iter != lora_tensors.end()) { - lora_down = ggml_ext_cast_f32(ctx, iter->second); + lora_down = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lora_up == nullptr || lora_down == nullptr) { @@ -208,7 +208,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_raw_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -225,7 +225,7 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(diff_name); if (iter != lora_tensors.end()) { - curr_updown = ggml_ext_cast_f32(ctx, iter->second); + curr_updown = ggml_ext_cast_f32(ctx, backend, iter->second); } else { break; } @@ -248,7 +248,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_loha_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -276,33 +276,33 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(hada_1_down_name); if (iter != lora_tensors.end()) { - hada_1_down = ggml_ext_cast_f32(ctx, iter->second); + hada_1_down = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_1_up_name); if (iter != lora_tensors.end()) { - hada_1_up = ggml_ext_cast_f32(ctx, iter->second); + hada_1_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_1_mid_name); if (iter != lora_tensors.end()) { - hada_1_mid = ggml_ext_cast_f32(ctx, iter->second); + hada_1_mid = ggml_ext_cast_f32(ctx, backend, iter->second); hada_1_up = ggml_cont(ctx, ggml_transpose(ctx, hada_1_up)); } iter = lora_tensors.find(hada_2_down_name); if (iter != lora_tensors.end()) { - hada_2_down = ggml_ext_cast_f32(ctx, iter->second); + hada_2_down = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_2_up_name); if (iter != lora_tensors.end()) { - hada_2_up = ggml_ext_cast_f32(ctx, iter->second); + hada_2_up = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(hada_2_mid_name); if (iter != lora_tensors.end()) { - hada_2_mid = ggml_ext_cast_f32(ctx, iter->second); + hada_2_mid = ggml_ext_cast_f32(ctx, backend, iter->second); hada_2_up = ggml_cont(ctx, ggml_transpose(ctx, hada_2_up)); } @@ -351,7 +351,7 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx) { + ggml_tensor* get_lokr_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_backend_t backend) { ggml_tensor* updown = nullptr; int index = 0; while (true) { @@ -378,24 +378,24 @@ struct LoraModel : public GGMLRunner { auto iter = lora_tensors.find(lokr_w1_name); if (iter != lora_tensors.end()) { - lokr_w1 = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1 = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w2_name); if (iter != lora_tensors.end()) { - lokr_w2 = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2 = ggml_ext_cast_f32(ctx, backend, iter->second); } int64_t rank = 1; if (lokr_w1 == nullptr) { iter = lora_tensors.find(lokr_w1_a_name); if (iter != lora_tensors.end()) { - lokr_w1_a = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1_a = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w1_b_name); if (iter != lora_tensors.end()) { - lokr_w1_b = ggml_ext_cast_f32(ctx, iter->second); + lokr_w1_b = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lokr_w1_a == nullptr || lokr_w1_b == nullptr) { @@ -410,12 +410,12 @@ struct LoraModel : public GGMLRunner { if (lokr_w2 == nullptr) { iter = lora_tensors.find(lokr_w2_a_name); if (iter != lora_tensors.end()) { - lokr_w2_a = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2_a = ggml_ext_cast_f32(ctx, backend, iter->second); } iter = lora_tensors.find(lokr_w2_b_name); if (iter != lora_tensors.end()) { - lokr_w2_b = ggml_ext_cast_f32(ctx, iter->second); + lokr_w2_b = ggml_ext_cast_f32(ctx, backend, iter->second); } if (lokr_w2_a == nullptr || lokr_w2_b == nullptr) { @@ -468,23 +468,23 @@ struct LoraModel : public GGMLRunner { return updown; } - ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) { + ggml_tensor* get_weight_diff(const std::string& model_tensor_name, ggml_backend_t backend, ggml_context* ctx, ggml_tensor* model_tensor, bool with_lora_and_lokr = true) { // lora ggml_tensor* diff = nullptr; if (with_lora_and_lokr) { - diff = get_lora_weight_diff(model_tensor_name, ctx); + diff = get_lora_weight_diff(model_tensor_name, ctx, backend); } // diff if (diff == nullptr) { - diff = get_raw_weight_diff(model_tensor_name, ctx); + diff = get_raw_weight_diff(model_tensor_name, ctx, backend); } // loha if (diff == nullptr) { - diff = get_loha_weight_diff(model_tensor_name, ctx); + diff = get_loha_weight_diff(model_tensor_name, ctx, backend); } // lokr if (diff == nullptr && with_lora_and_lokr) { - diff = get_lokr_weight_diff(model_tensor_name, ctx); + diff = get_lokr_weight_diff(model_tensor_name, ctx, backend); } if (diff != nullptr) { if (ggml_nelements(diff) < ggml_nelements(model_tensor)) { @@ -761,7 +761,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* model_tensor = it.second; // lora - ggml_tensor* diff = get_weight_diff(model_tensor_name, compute_ctx, model_tensor); + ggml_tensor* diff = get_weight_diff(model_tensor_name, runtime_backend, compute_ctx, model_tensor); if (diff == nullptr) { continue; } @@ -774,7 +774,7 @@ struct LoraModel : public GGMLRunner { ggml_tensor* final_tensor; if (model_tensor->type != GGML_TYPE_F32 && model_tensor->type != GGML_TYPE_F16) { - final_tensor = ggml_ext_cast_f32(compute_ctx, model_tensor); + final_tensor = ggml_ext_cast_f32(compute_ctx, runtime_backend, model_tensor); final_tensor = ggml_add_inplace(compute_ctx, final_tensor, diff); final_tensor = ggml_cpy(compute_ctx, final_tensor, model_tensor); } else { @@ -841,34 +841,35 @@ struct MultiLoraAdapter : public WeightAdapter { : lora_models(lora_models) { } - ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) { + ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name, bool with_lora_and_lokr) { for (auto& lora_model : lora_models) { - ggml_tensor* diff = lora_model->get_weight_diff(weight_name, ctx, weight, with_lora_and_lokr); + ggml_tensor* diff = lora_model->get_weight_diff(weight_name, backend, ctx, weight, with_lora_and_lokr); if (diff == nullptr) { continue; } if (weight->type != GGML_TYPE_F32 && weight->type != GGML_TYPE_F16) { - weight = ggml_ext_cast_f32(ctx, weight); + weight = ggml_ext_cast_f32(ctx, backend, weight); } weight = ggml_add(ctx, weight, diff); } return weight; } - ggml_tensor* patch_weight(ggml_context* ctx, ggml_tensor* weight, const std::string& weight_name) override { - return patch_weight(ctx, weight, weight_name, true); + ggml_tensor* patch_weight(ggml_context* ctx, ggml_backend_t backend, ggml_tensor* weight, const std::string& weight_name) override { + return patch_weight(ctx, backend, weight, weight_name, true); } ggml_tensor* forward_with_lora(ggml_context* ctx, + ggml_backend_t backend, ggml_tensor* x, ggml_tensor* w, ggml_tensor* b, const std::string& prefix, WeightAdapter::ForwardParams forward_params) override { - w = patch_weight(ctx, w, prefix + "weight", false); + w = patch_weight(ctx, backend, w, prefix + "weight", false); if (b) { - b = patch_weight(ctx, b, prefix + "bias", false); + b = patch_weight(ctx, backend, b, prefix + "bias", false); } ggml_tensor* out; if (forward_params.op_type == ForwardParams::op_type_t::OP_LINEAR) { diff --git a/src/model.cpp b/src/model.cpp index d23b97fac..b7f9ea0c5 100644 --- a/src/model.cpp +++ b/src/model.cpp @@ -25,18 +25,6 @@ #include "name_conversion.h" #include "stable-diffusion.h" -#ifdef SD_USE_METAL -#include "ggml-metal.h" -#endif - -#ifdef SD_USE_VULKAN -#include "ggml-vulkan.h" -#endif - -#ifdef SD_USE_OPENCL -#include "ggml-opencl.h" -#endif - #define ST_HEADER_SIZE_LEN 8 uint64_t read_u64(uint8_t* buffer) { diff --git a/src/qwen_image.hpp b/src/qwen_image.hpp index 68af0e8e8..ebc10ec52 100644 --- a/src/qwen_image.hpp +++ b/src/qwen_image.hpp @@ -95,9 +95,7 @@ namespace Qwen { float scale = 1.f / 32.f; bool force_prec_f32 = false; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example when using CUDA but the weights are k-quants (not all prompts). blocks["to_out.0"] = std::shared_ptr(new Linear(inner_dim, out_dim, out_bias, false, force_prec_f32, scale)); @@ -124,6 +122,10 @@ namespace Qwen { auto to_v = std::dynamic_pointer_cast(blocks["to_v"]); auto to_out_0 = std::dynamic_pointer_cast(blocks["to_out.0"]); + if (sd_backend_is(ctx->backend, "Vulkan")) { + to_out_0->set_force_prec_f32(true); + } + auto norm_added_q = std::dynamic_pointer_cast(blocks["norm_added_q"]); auto norm_added_k = std::dynamic_pointer_cast(blocks["norm_added_k"]); diff --git a/src/stable-diffusion.cpp b/src/stable-diffusion.cpp index bbf2f979d..bf2453a73 100644 --- a/src/stable-diffusion.cpp +++ b/src/stable-diffusion.cpp @@ -1,5 +1,7 @@ +#include "ggml-cpu.h" #include "ggml_extend.hpp" +#include #include "model.h" #include "rng.hpp" #include "rng_mt19937.hpp" @@ -25,6 +27,10 @@ #include "latent-preview.h" #include "name_conversion.h" +#if GGML_RPC +#include "ggml-rpc.h" +#endif + const char* model_version_to_str[] = { "SD 1.x", "SD 1.x Inpaint", @@ -476,14 +482,131 @@ static void log_sample_cache_summary(const SampleCacheRuntime& runtime, size_t t } } +std::vector string_split(const std::string& input, char separator) { + std::vector parts; + size_t begin_pos = 0; + size_t separator_pos = input.find(separator); + while (separator_pos != std::string::npos) { + std::string part = input.substr(begin_pos, separator_pos - begin_pos); + parts.emplace_back(part); + begin_pos = separator_pos + 1; + separator_pos = input.find(separator, begin_pos); + } + parts.emplace_back(input.substr(begin_pos, separator_pos - begin_pos)); + return parts; +} + +static void add_rpc_devices(const std::string& servers) { + auto rpc_servers = string_split(servers, ','); + if (rpc_servers.empty()) { + LOG_ERROR("no RPC servers specified"); + return; + } + ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC"); + if (!rpc_reg) { + LOG_ERROR("failed to find RPC backend"); + return; + } + typedef ggml_backend_reg_t (*ggml_backend_rpc_add_server_t)(const char* endpoint); + ggml_backend_rpc_add_server_t ggml_backend_rpc_add_server_fn = (ggml_backend_rpc_add_server_t)ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_server"); + if (!ggml_backend_rpc_add_server_fn) { + LOG_ERROR("failed to find RPC add server function"); + return; + } + for (const auto& server : rpc_servers) { + auto reg = ggml_backend_rpc_add_server_fn(server.c_str()); + ggml_backend_register(reg); + } +} + +void add_rpc_device(const char* servers_cstr) { + std::string servers(servers_cstr); + add_rpc_devices(servers); +} + +std::vector sanitize_backend_name_list(std::string name) { + std::vector vec = {}; + if (name == "" || backend_name_exists(name)) { + // single backend + vec.push_back(name); + } else if (name.find(",") != std::string::npos) { + // comma-separated backend names + std::stringstream ss(name); + std::string token; + while (std::getline(ss, token, ',')) { + if (token == "" || backend_name_exists(token)) { + vec.push_back(token); + } else { + LOG_WARN("backend name %s not found, using default", token.c_str()); + vec.push_back(""); + } + } + } else { + vec.push_back(""); + } + return vec; +} + +std::vector> list_backends_vector() { + std::vector> backends; + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + auto dev = ggml_backend_dev_get(i); + backends.push_back({ggml_backend_dev_name(dev), ggml_backend_dev_description(dev)}); + } + return backends; +} + +SD_API size_t backend_list_size() { + // for C API + size_t buffer_size = 0; + auto backends = list_backends_vector(); + for (auto& backend : backends) { + auto dev_name_size = backend.first.size(); + auto dev_desc_size = backend.second.size(); + buffer_size += dev_name_size + dev_desc_size + 2; // +2 for the separators + } + return buffer_size; +} + +// devices are separated by \n and name and description are separated by \t +SD_API void list_backends_to_buffer(char* buffer, size_t buffer_size) { + auto backends = list_backends_vector(); + size_t offset = 0; + for (auto& backend : backends) { + size_t name_size = backend.first.size(); + size_t desc_size = backend.second.size(); + if (offset + name_size + desc_size + 2 > buffer_size) { + break; // Not enough space in the buffer + } + memcpy(buffer + offset, backend.first.c_str(), name_size); + offset += name_size; + buffer[offset++] = '\t'; + memcpy(buffer + offset, backend.second.c_str(), desc_size); + offset += desc_size; + buffer[offset++] = '\n'; + } + if (offset < buffer_size) { + buffer[offset] = '\0'; // Ensure the buffer is null-terminated at the end + } else { + LOG_WARN("Provided buffer size is too small to contain details of all devices."); + buffer[buffer_size - 1] = '\0'; // Ensure the buffer is null-terminated at the end + } +} + /*=============================================== StableDiffusionGGML ================================================*/ class StableDiffusionGGML { public: ggml_backend_t backend = nullptr; // general backend - ggml_backend_t clip_backend = nullptr; + ggml_backend_t diffusion_backend = nullptr; ggml_backend_t control_net_backend = nullptr; ggml_backend_t vae_backend = nullptr; + ggml_backend_t tae_backend = nullptr; + ggml_backend_t pmid_backend = nullptr; + ggml_backend_t vision_backend = nullptr; + + std::vector clip_backends = {nullptr}; SDVersion version; bool vae_decode_only = false; @@ -531,72 +654,33 @@ class StableDiffusionGGML { StableDiffusionGGML() = default; ~StableDiffusionGGML() { - if (clip_backend != backend) { - ggml_backend_free(clip_backend); + if (diffusion_backend && diffusion_backend != backend) { + ggml_backend_free(diffusion_backend); + } + for (auto clip_backend : clip_backends) { + if (clip_backend && clip_backend != backend) { + ggml_backend_free(clip_backend); + } } - if (control_net_backend != backend) { + if (control_net_backend && control_net_backend != backend) { ggml_backend_free(control_net_backend); } - if (vae_backend != backend) { - ggml_backend_free(vae_backend); + if (tae_backend && tae_backend != vae_backend) { + ggml_backend_free(tae_backend); } - ggml_backend_free(backend); - } - - void init_backend() { -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - size_t device = 0; - const int device_count = ggml_backend_vk_get_device_count(); - if (device_count) { - const char* SD_VK_DEVICE = getenv("SD_VK_DEVICE"); - if (SD_VK_DEVICE != nullptr) { - std::string sd_vk_device_str = SD_VK_DEVICE; - try { - device = std::stoull(sd_vk_device_str); - } catch (const std::invalid_argument&) { - LOG_WARN("SD_VK_DEVICE environment variable is not a valid integer (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } catch (const std::out_of_range&) { - LOG_WARN("SD_VK_DEVICE environment variable value is out of range for `unsigned long long` type (%s). Falling back to device 0.", SD_VK_DEVICE); - device = 0; - } - if (device >= device_count) { - LOG_WARN("Cannot find targeted vulkan device (%llu). Falling back to device 0.", device); - device = 0; - } - } - LOG_INFO("Vulkan: Using device %llu", device); - backend = ggml_backend_vk_init(device); - } - if (!backend) { - LOG_WARN("Failed to initialize Vulkan backend"); + if (vae_backend && vae_backend != backend) { + ggml_backend_free(vae_backend); } -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - // ggml_log_set(ggml_log_callback_default, nullptr); // Optional ggml logs - backend = ggml_backend_opencl_init(); - if (!backend) { - LOG_WARN("Failed to initialize OpenCL backend"); + if (backend) { + ggml_backend_free(backend); } -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif + } - if (!backend) { - LOG_DEBUG("Using CPU backend"); - backend = ggml_backend_cpu_init(); + void log_backends() { + const int device_count = ggml_backend_dev_count(); + for (int i = 0; i < device_count; i++) { + auto dev = ggml_backend_dev_get(i); + LOG_INFO("%s (%s)", ggml_backend_dev_name(dev), ggml_backend_dev_description(dev)); } } @@ -627,7 +711,54 @@ class StableDiffusionGGML { ggml_log_set(ggml_log_callback_default, nullptr); - init_backend(); + log_backends(); + + std::string default_backend_name = get_default_backend_name(); + + std::string override_default_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->main_device)); + + if (override_default_backend_name.size() > 0) { + LOG_INFO("Setting default backend to %s", override_default_backend_name.c_str()); + default_backend_name = override_default_backend_name; + } + + std::string diffusion_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->diffusion_device)); + std::vector clip_backend_names = sanitize_backend_name_list(SAFE_STR(sd_ctx_params->clip_device)); + std::string control_net_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->control_net_device)); + std::string vae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vae_device)); + std::string tae_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->tae_device)); + std::string pmid_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->photomaker_device)); + std::string vision_backend_name = sanitize_backend_name(SAFE_STR(sd_ctx_params->vision_device)); + + bool diffusion_backend_is_default = diffusion_backend_name.empty() || diffusion_backend_name == default_backend_name; + bool clip_backends_are_default = true; + for (const auto& clip_backend_name : clip_backend_names) { + if (!clip_backend_name.empty() && clip_backend_name != default_backend_name) { + clip_backends_are_default = false; + break; + } + } + bool control_net_backend_is_default = (control_net_backend_name.empty() || control_net_backend_name == default_backend_name); + bool vae_backend_is_default = (vae_backend_name.empty() || vae_backend_name == default_backend_name); + // if tae_backend_name is empty, it will use the same backend as vae + bool tae_backend_is_default = (tae_backend_name.empty() && vae_backend_is_default) || tae_backend_name == default_backend_name; + bool pmid_backend_is_default = (pmid_backend_name.empty() || pmid_backend_name == default_backend_name); + bool vision_backend_is_default = (vision_backend_name.empty() || vision_backend_name == default_backend_name); + + // if some backend is not specified or is the same as the default backend, use the default backend + bool use_default_backend = diffusion_backend_is_default || clip_backends_are_default || control_net_backend_is_default || vae_backend_is_default || tae_backend_is_default || pmid_backend_is_default || vision_backend_is_default; + + if (use_default_backend) { + backend = init_named_backend(override_default_backend_name); + LOG_DEBUG("Loaded default backend %s", ggml_backend_name(backend)); + } + + if (!diffusion_backend_is_default) { + diffusion_backend = init_named_backend(diffusion_backend_name); + LOG_INFO("Using diffusion backend: %s", ggml_backend_name(diffusion_backend)); + } else { + diffusion_backend = backend; + } ModelLoader model_loader; @@ -798,21 +929,24 @@ class StableDiffusionGGML { LOG_INFO("Using circular padding for convolutions"); } - bool clip_on_cpu = sd_ctx_params->keep_clip_on_cpu; - { - clip_backend = backend; - if (clip_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_INFO("CLIP: Using CPU backend"); - clip_backend = ggml_backend_cpu_init(); + if (!clip_backends_are_default) { + clip_backends.clear(); + for (auto clip_backend_name : clip_backend_names) { + auto clip_backend = init_named_backend(clip_backend_name); + LOG_INFO("CLIP: Using %s backend", ggml_backend_name(clip_backend)); + clip_backends.push_back(clip_backend); + } + } else { + clip_backends = {backend}; } if (sd_version_is_sd3(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map); } else if (sd_version_is_flux(version)) { bool is_chroma = false; for (auto pair : tensor_storage_map) { @@ -831,53 +965,53 @@ class StableDiffusionGGML { "--chroma-disable-dit-mask as a workaround."); } - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, sd_ctx_params->chroma_use_t5_mask, sd_ctx_params->chroma_t5_mask_pad); } else if (version == VERSION_OVIS_IMAGE) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version, "", false); } else { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map); } - diffusion_model = std::make_shared(backend, + diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, version, sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_flux2(version)) { bool is_chroma = false; - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - version, - sd_ctx_params->chroma_use_dit_mask); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + version, + sd_ctx_params->chroma_use_dit_mask); } else if (sd_version_is_wan(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, true, 1, true); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version); if (strlen(SAFE_STR(sd_ctx_params->high_noise_diffusion_model_path)) > 0) { - high_noise_diffusion_model = std::make_shared(backend, + high_noise_diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, "model.high_noise_diffusion_model", @@ -886,7 +1020,11 @@ class StableDiffusionGGML { if (diffusion_model->get_desc() == "Wan2.1-I2V-14B" || diffusion_model->get_desc() == "Wan2.1-FLF2V-14B" || diffusion_model->get_desc() == "Wan2.1-I2V-1.3B") { - clip_vision = std::make_shared(backend, + if (!vision_backend) { + LOG_ERROR("WAN2.1 Need a vision_backend"); + return false; + } + clip_vision = std::make_shared(vision_backend, offload_params_to_cpu, tensor_storage_map); clip_vision->alloc_params_buffer(); @@ -897,56 +1035,56 @@ class StableDiffusionGGML { if (!vae_decode_only) { enable_vision = true; } - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version, "", enable_vision); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version, - sd_ctx_params->qwen_image_zero_cond_t); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version, + sd_ctx_params->qwen_image_zero_cond_t); } else if (sd_version_is_anima(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map); diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model"); + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model"); } else if (sd_version_is_z_image(version)) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends[0], offload_params_to_cpu, tensor_storage_map, version); - diffusion_model = std::make_shared(backend, - offload_params_to_cpu, - tensor_storage_map, - "model.diffusion_model", - version); + diffusion_model = std::make_shared(diffusion_backend, + offload_params_to_cpu, + tensor_storage_map, + "model.diffusion_model", + version); } else { // SD1.x SD2.x SDXL std::map embbeding_map; for (uint32_t i = 0; i < sd_ctx_params->embedding_count; i++) { embbeding_map.emplace(SAFE_STR(sd_ctx_params->embeddings[i].name), SAFE_STR(sd_ctx_params->embeddings[i].path)); } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map, embbeding_map, version, PM_VERSION_2); } else { - cond_stage_model = std::make_shared(clip_backend, + cond_stage_model = std::make_shared(clip_backends, offload_params_to_cpu, tensor_storage_map, embbeding_map, version); } - diffusion_model = std::make_shared(backend, + diffusion_model = std::make_shared(diffusion_backend, offload_params_to_cpu, tensor_storage_map, version); @@ -971,18 +1109,22 @@ class StableDiffusionGGML { high_noise_diffusion_model->get_param_tensors(tensors); } - if (sd_ctx_params->keep_vae_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_INFO("VAE Autoencoder: Using CPU backend"); - vae_backend = ggml_backend_cpu_init(); - } else { - vae_backend = backend; + vae_backend = backend; + if (!vae_backend_is_default) { + vae_backend = init_named_backend(vae_backend_name); + LOG_INFO("VAE Autoencoder: Using %s backend", ggml_backend_name(vae_backend)); + } + tae_backend = vae_backend; + if (tae_backend_name.length() > 0 && tae_backend_name != vae_backend_name) { + tae_backend = init_named_backend(tae_backend_name); + LOG_INFO("Tiny Autoencoder: Using %s backend", ggml_backend_name(tae_backend)); } auto create_tae = [&]() -> std::shared_ptr { if (sd_version_is_wan(version) || sd_version_is_qwen_image(version) || sd_version_is_anima(version)) { - return std::make_shared(vae_backend, + return std::make_shared(tae_backend, offload_params_to_cpu, tensor_storage_map, "decoder", @@ -990,7 +1132,7 @@ class StableDiffusionGGML { version); } else { - auto model = std::make_shared(vae_backend, + auto model = std::make_shared(tae_backend, offload_params_to_cpu, tensor_storage_map, "decoder.layers", @@ -1063,14 +1205,13 @@ class StableDiffusionGGML { } if (strlen(SAFE_STR(sd_ctx_params->control_net_path)) > 0) { - ggml_backend_t controlnet_backend = nullptr; - if (sd_ctx_params->keep_control_net_on_cpu && !ggml_backend_is_cpu(backend)) { - LOG_DEBUG("ControlNet: Using CPU backend"); - controlnet_backend = ggml_backend_cpu_init(); + if (!control_net_backend_is_default) { + control_net_backend = init_named_backend(control_net_backend_name); + LOG_INFO("ControlNet: Using %s backend", ggml_backend_name(control_net_backend)); } else { - controlnet_backend = backend; + control_net_backend = backend; } - control_net = std::make_shared(controlnet_backend, + control_net = std::make_shared(control_net_backend, offload_params_to_cpu, tensor_storage_map, version); @@ -1079,9 +1220,15 @@ class StableDiffusionGGML { control_net->set_conv2d_direct_enabled(true); } } - + pmid_backend = backend; + if (!pmid_backend_is_default) { + pmid_backend = init_named_backend(pmid_backend_name); + LOG_INFO("PhotoMaker: Using %s backend", ggml_backend_name(pmid_backend)); + } else { + pmid_backend = backend; + } if (strstr(SAFE_STR(sd_ctx_params->photo_maker_path), "v2")) { - pmid_model = std::make_shared(backend, + pmid_model = std::make_shared(pmid_backend, offload_params_to_cpu, tensor_storage_map, "pmid", @@ -1089,21 +1236,27 @@ class StableDiffusionGGML { PM_VERSION_2); LOG_INFO("using PhotoMaker Version 2"); } else { - pmid_model = std::make_shared(backend, + pmid_model = std::make_shared(pmid_backend, offload_params_to_cpu, tensor_storage_map, "pmid", version); } if (strlen(SAFE_STR(sd_ctx_params->photo_maker_path)) > 0) { - pmid_lora = std::make_shared("pmid", backend, sd_ctx_params->photo_maker_path, "", version); + pmid_lora = std::make_shared("pmid", diffusion_backend, sd_ctx_params->photo_maker_path, "", version); auto lora_tensor_filter = [&](const std::string& tensor_name) { if (starts_with(tensor_name, "lora.model")) { return true; } return false; }; - if (!pmid_lora->load_from_file(n_threads, lora_tensor_filter)) { + int n_th = n_threads; +#ifdef GGML_RPC + if (ggml_backend_is_rpc(diffusion_backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!pmid_lora->load_from_file(n_th, lora_tensor_filter)) { LOG_WARN("load photomaker lora tensors from %s failed", sd_ctx_params->photo_maker_path); return false; } @@ -1195,7 +1348,22 @@ class StableDiffusionGGML { if (version == VERSION_SVD) { ignore_tensors.insert("conditioner.embedders.3"); } - bool success = model_loader.load_tensors(tensors, ignore_tensors, n_threads, sd_ctx_params->enable_mmap); + int n_th = n_threads; +#ifdef GGML_RPC + // TODO: maybe set it to 1 threads only for model parts that are on remote? + bool is_any_clip_rpc = false; + for (auto& backend : clip_backends) { + if (ggml_backend_is_rpc(backend)) { + is_any_clip_rpc = true; + } + } + // I think those are all the backends that should get sent data to when calling model_loader.load_tensors() + if (is_any_clip_rpc || ggml_backend_is_rpc(diffusion_backend) || ggml_backend_is_rpc(vae_backend) || ggml_backend_is_rpc(vision_backend) || ggml_backend_is_rpc(pmid_backend)) { + LOG_DEBUG("Using single-thread for tensor loading because RPC backend is used"); + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + bool success = model_loader.load_tensors(tensors, ignore_tensors, n_th, sd_ctx_params->enable_mmap); if (!success) { LOG_ERROR("load tensors from model loader failed"); ggml_free(ctx); @@ -1217,7 +1385,13 @@ class StableDiffusionGGML { } size_t control_net_params_mem_size = 0; if (control_net) { - if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path), n_threads)) { + int n_th = n_threads; +#ifdef GGML_RPC + if (ggml_backend_is_rpc(control_net_backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!control_net->load_from_file(SAFE_STR(sd_ctx_params->control_net_path), n_th)) { return false; } control_net_params_mem_size = control_net->get_params_buffer_size(); @@ -1229,13 +1403,15 @@ class StableDiffusionGGML { size_t total_params_ram_size = 0; size_t total_params_vram_size = 0; - if (ggml_backend_is_cpu(clip_backend)) { + + // TODO: split by individual text encoders + if (ggml_backend_is_cpu(clip_backends[0])) { total_params_ram_size += clip_params_mem_size + pmid_params_mem_size; } else { total_params_vram_size += clip_params_mem_size + pmid_params_mem_size; } - if (ggml_backend_is_cpu(backend)) { + if (ggml_backend_is_cpu(diffusion_backend)) { total_params_ram_size += unet_params_mem_size; } else { total_params_vram_size += unet_params_mem_size; @@ -1261,7 +1437,8 @@ class StableDiffusionGGML { total_params_vram_size / 1024.0 / 1024.0, total_params_ram_size / 1024.0 / 1024.0, clip_params_mem_size / 1024.0 / 1024.0, - ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM", + // TODO: split + ggml_backend_is_cpu(clip_backends[0]) ? "RAM" : "VRAM", unet_params_mem_size / 1024.0 / 1024.0, ggml_backend_is_cpu(backend) ? "RAM" : "VRAM", vae_params_mem_size / 1024.0 / 1024.0, @@ -1269,7 +1446,7 @@ class StableDiffusionGGML { control_net_params_mem_size / 1024.0 / 1024.0, ggml_backend_is_cpu(control_net_backend) ? "RAM" : "VRAM", pmid_params_mem_size / 1024.0 / 1024.0, - ggml_backend_is_cpu(clip_backend) ? "RAM" : "VRAM"); + ggml_backend_is_cpu(pmid_backend) ? "RAM" : "VRAM"); } // init denoiser @@ -1423,7 +1600,13 @@ class StableDiffusionGGML { LOG_DEBUG("high noise lora: %s", lora_path.c_str()); } auto lora = std::make_shared(lora_id, backend, lora_path, is_high_noise ? "model.high_noise_" : "", version); - if (!lora->load_from_file(n_threads, lora_tensor_filter)) { + int n_th = n_threads; +#ifdef GGML_RPC + if (ggml_backend_is_rpc(backend)) { + n_th = 1; // avoid multi-thread for loading to remote + } +#endif + if (!lora->load_from_file(n_th, lora_tensor_filter)) { LOG_WARN("load lora tensors from %s failed", lora_path.c_str()); return nullptr; } @@ -1459,15 +1642,59 @@ class StableDiffusionGGML { } for (auto& kv : lora_state_diff) { - int64_t t0 = ggml_time_ms(); + bool applied = false; + int64_t t0 = ggml_time_ms(); + auto lora_tensor_filter_diff = [&](const std::string& tensor_name) { + if (is_diffusion_model_name(tensor_name)) { + return true; + } + return false; + }; - auto lora = load_lora_model_from_file(kv.first, kv.second, backend); - if (!lora || lora->lora_tensors.empty()) { - continue; + LOG_INFO("applying lora to diffusion model"); + auto lora = load_lora_model_from_file(kv.first, kv.second, diffusion_backend, lora_tensor_filter_diff); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; + } + + for (int i = 0; i < cond_stage_model->model_count; i++) { + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return cond_stage_model->is_cond_stage_model_name_at_index(tensor_name, i); + } + return false; + }; + // TODO: split by model + LOG_INFO("applying lora to text encoder (%d)", i); + auto backend = cond_stage_model->get_params_backend_at_index(i); + lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; + } } - lora->apply(tensors, version, n_threads); - lora->free_params_buffer(); + auto lora_tensor_filter_first = [&](const std::string& tensor_name) { + if (is_first_stage_model_name(tensor_name)) { + return true; + } + return false; + }; + LOG_INFO("applying lora to first stage model"); + auto first_stage_backend = first_stage_model->get_params_backend(); + lora = load_lora_model_from_file(kv.first, kv.second, first_stage_backend, lora_tensor_filter_first); + if (lora && !lora->lora_tensors.empty()) { + lora->apply(tensors, version, n_threads); + lora->free_params_buffer(); + applied = true; + } + + if (!applied) { + continue; + } int64_t t1 = ggml_time_ms(); LOG_INFO("lora '%s' applied, taking %.2fs", kv.first.c_str(), (t1 - t0) * 1.0f / 1000); @@ -1508,23 +1735,27 @@ class StableDiffusionGGML { lora_state_diff.erase(iter); } } - cond_stage_lora_models = lora_models; - auto lora_tensor_filter = [&](const std::string& tensor_name) { - if (is_cond_stage_model_name(tensor_name)) { - return true; - } - return false; - }; - for (auto& kv : lora_state_diff) { - const std::string& lora_id = kv.first; - float multiplier = kv.second; + cond_stage_lora_models = lora_models; - auto lora = load_lora_model_from_file(lora_id, multiplier, clip_backend, lora_tensor_filter); - if (lora && !lora->lora_tensors.empty()) { - lora->preprocess_lora_tensors(tensors); - cond_stage_lora_models.push_back(lora); + for (int i = 0; i < cond_stage_model->model_count; i++) { + auto lora_tensor_filter_cond = [&](const std::string& tensor_name) { + if (is_cond_stage_model_name(tensor_name)) { + return cond_stage_model->is_cond_stage_model_name_at_index(tensor_name, i); + } + return false; + }; + for (auto& kv : lora_state_diff) { + const std::string& lora_id = kv.first; + float multiplier = kv.second; + auto backend = cond_stage_model->get_runtime_backend_at_index(i); + auto lora = load_lora_model_from_file(kv.first, kv.second, backend, lora_tensor_filter_cond); + if (lora && !lora->lora_tensors.empty()) { + lora->preprocess_lora_tensors(tensors); + cond_stage_lora_models.push_back(lora); + } } } + auto multi_lora_adapter = std::make_shared(cond_stage_lora_models); cond_stage_model->set_weight_adapter(multi_lora_adapter); } @@ -1551,7 +1782,7 @@ class StableDiffusionGGML { const std::string& lora_name = kv.first; float multiplier = kv.second; - auto lora = load_lora_model_from_file(lora_name, multiplier, backend, lora_tensor_filter); + auto lora = load_lora_model_from_file(lora_name, multiplier, diffusion_backend, lora_tensor_filter); if (lora && !lora->lora_tensors.empty()) { lora->preprocess_lora_tensors(tensors); diffusion_lora_models.push_back(lora); @@ -1871,9 +2102,9 @@ class StableDiffusionGGML { uint32_t dim = static_cast(latents->ne[ggml_n_dims(latents) - 1]); if (preview_mode == PREVIEW_PROJ) { - int patch_sz = 1; - const float(*latent_rgb_proj)[channel] = nullptr; - float* latent_rgb_bias = nullptr; + int patch_sz = 1; + const float (*latent_rgb_proj)[channel] = nullptr; + float* latent_rgb_bias = nullptr; if (dim == 128) { if (sd_version_is_flux2(version)) { @@ -2679,9 +2910,6 @@ void sd_ctx_params_init(sd_ctx_params_t* sd_ctx_params) { sd_ctx_params->lora_apply_mode = LORA_APPLY_AUTO; sd_ctx_params->offload_params_to_cpu = false; sd_ctx_params->enable_mmap = false; - sd_ctx_params->keep_clip_on_cpu = false; - sd_ctx_params->keep_control_net_on_cpu = false; - sd_ctx_params->keep_vae_on_cpu = false; sd_ctx_params->diffusion_flash_attn = false; sd_ctx_params->circular_x = false; sd_ctx_params->circular_y = false; @@ -2695,7 +2923,7 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { if (!buf) return nullptr; buf[0] = '\0'; - + // TODO devices snprintf(buf + strlen(buf), 4096 - strlen(buf), "model_path: %s\n" "clip_l_path: %s\n" @@ -2719,9 +2947,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { "sampler_rng_type: %s\n" "prediction: %s\n" "offload_params_to_cpu: %s\n" - "keep_clip_on_cpu: %s\n" - "keep_control_net_on_cpu: %s\n" - "keep_vae_on_cpu: %s\n" "flash_attn: %s\n" "diffusion_flash_attn: %s\n" "circular_x: %s\n" @@ -2751,9 +2976,6 @@ char* sd_ctx_params_to_str(const sd_ctx_params_t* sd_ctx_params) { sd_rng_type_name(sd_ctx_params->sampler_rng_type), sd_prediction_name(sd_ctx_params->prediction), BOOL_STR(sd_ctx_params->offload_params_to_cpu), - BOOL_STR(sd_ctx_params->keep_clip_on_cpu), - BOOL_STR(sd_ctx_params->keep_control_net_on_cpu), - BOOL_STR(sd_ctx_params->keep_vae_on_cpu), BOOL_STR(sd_ctx_params->flash_attn), BOOL_STR(sd_ctx_params->diffusion_flash_attn), BOOL_STR(sd_ctx_params->circular_x), diff --git a/src/t5.hpp b/src/t5.hpp index 5f8c99dda..0cf1409d4 100644 --- a/src/t5.hpp +++ b/src/t5.hpp @@ -362,7 +362,7 @@ class T5UniGramTokenizer { BuildTrie(&pieces); } - ~T5UniGramTokenizer(){}; + ~T5UniGramTokenizer() {}; std::string Normalize(const std::string& input) const { // Ref: https://github.com/huggingface/tokenizers/blob/1ff56c0c70b045f0cd82da1af9ac08cd4c7a6f9f/bindings/python/py_src/tokenizers/implementations/sentencepiece_unigram.py#L29 diff --git a/src/tokenize_util.cpp b/src/tokenize_util.cpp index 22cf8ae2e..68f78d790 100644 --- a/src/tokenize_util.cpp +++ b/src/tokenize_util.cpp @@ -10,7 +10,9 @@ bool is_number(char32_t ch) { } bool is_letter(char32_t ch) { - static const struct { char32_t start, end; } ranges[] = { + static const struct { + char32_t start, end; + } ranges[] = { {0x41, 0x5A}, {0x61, 0x7A}, {0xAA, 0xAA}, diff --git a/src/upscaler.cpp b/src/upscaler.cpp index 18e185d06..5a0d9562a 100644 --- a/src/upscaler.cpp +++ b/src/upscaler.cpp @@ -22,37 +22,20 @@ struct UpscalerGGML { bool load_from_file(const std::string& esrgan_path, bool offload_params_to_cpu, - int n_threads) { + int n_threads, + std::string device = "") { ggml_log_set(ggml_log_callback_default, nullptr); -#ifdef SD_USE_CUDA - LOG_DEBUG("Using CUDA backend"); - backend = ggml_backend_cuda_init(0); -#endif -#ifdef SD_USE_METAL - LOG_DEBUG("Using Metal backend"); - backend = ggml_backend_metal_init(); -#endif -#ifdef SD_USE_VULKAN - LOG_DEBUG("Using Vulkan backend"); - backend = ggml_backend_vk_init(0); -#endif -#ifdef SD_USE_OPENCL - LOG_DEBUG("Using OpenCL backend"); - backend = ggml_backend_opencl_init(); -#endif -#ifdef SD_USE_SYCL - LOG_DEBUG("Using SYCL backend"); - backend = ggml_backend_sycl_init(0); -#endif + device = sanitize_backend_name(device); + backend = init_named_backend(device); ModelLoader model_loader; if (!model_loader.init_from_file_and_convert_name(esrgan_path)) { LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str()); } model_loader.set_wtype_override(model_data_type); - if (!backend) { - LOG_DEBUG("Using CPU backend"); - backend = ggml_backend_cpu_init(); - } + // if (!backend) { + // LOG_DEBUG("Using CPU backend"); + // backend = ggml_backend_cpu_init(); + // } LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type)); esrgan_upscaler = std::make_shared(backend, offload_params_to_cpu, tile_size, model_loader.get_tensor_storage_map()); if (direct) { @@ -118,7 +101,8 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, bool offload_params_to_cpu, bool direct, int n_threads, - int tile_size) { + int tile_size, + const char* device) { upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t)); if (upscaler_ctx == nullptr) { return nullptr; @@ -130,7 +114,7 @@ upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str, return nullptr; } - if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads)) { + if (!upscaler_ctx->upscaler->load_from_file(esrgan_path, offload_params_to_cpu, n_threads, SAFE_STR(device))) { delete upscaler_ctx->upscaler; upscaler_ctx->upscaler = nullptr; free(upscaler_ctx); diff --git a/src/util.cpp b/src/util.cpp index a94cfd986..511a595ab 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -743,3 +743,15 @@ std::vector> parse_prompt_attention(const std::str return res; } + +// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. +bool sd_backend_is(ggml_backend_t backend, const std::string& name) { + if (!backend) { + return false; + } + ggml_backend_dev_t dev = ggml_backend_get_device(backend); + if (!dev) + return false; + std::string dev_name = ggml_backend_dev_name(dev); + return dev_name.find(name) != std::string::npos; +} diff --git a/src/util.h b/src/util.h index 7dee7bf51..a77e05046 100644 --- a/src/util.h +++ b/src/util.h @@ -6,6 +6,7 @@ #include #include +#include "ggml-backend.h" #include "stable-diffusion.h" #define SAFE_STR(s) ((s) ? (s) : "") @@ -86,6 +87,9 @@ int sd_get_preview_interval(); bool sd_should_preview_denoised(); bool sd_should_preview_noisy(); +// test if the backend is a specific one, e.g. "CUDA", "ROCm", "Vulkan" etc. +bool sd_backend_is(ggml_backend_t backend, const std::string& name); + #define LOG_DEBUG(format, ...) log_printf(SD_LOG_DEBUG, __FILE__, __LINE__, format, ##__VA_ARGS__) #define LOG_INFO(format, ...) log_printf(SD_LOG_INFO, __FILE__, __LINE__, format, ##__VA_ARGS__) #define LOG_WARN(format, ...) log_printf(SD_LOG_WARN, __FILE__, __LINE__, format, ##__VA_ARGS__) diff --git a/src/wan.hpp b/src/wan.hpp index af8acbfda..82cd33390 100644 --- a/src/wan.hpp +++ b/src/wan.hpp @@ -1150,12 +1150,12 @@ namespace WAN { -0.0313f, -0.1649f, 0.0117f, 0.0723f, -0.2839f, -0.2083f, -0.0520f, 0.3748f, 0.0152f, 0.1957f, 0.1433f, -0.2944f, 0.3573f, -0.0548f, -0.1681f, -0.0667f}; latents_std_vec = { - 0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f, - 0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f, - 0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f, - 0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f, - 0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f, - 0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f}; + 0.4765f, 1.0364f, 0.4514f, 1.1677f, 0.5313f, 0.4990f, 0.4818f, 0.5013f, + 0.8158f, 1.0344f, 0.5894f, 1.0901f, 0.6885f, 0.6165f, 0.8454f, 0.4978f, + 0.5759f, 0.3523f, 0.7135f, 0.6804f, 0.5833f, 1.4146f, 0.8986f, 0.5659f, + 0.7069f, 0.5338f, 0.4889f, 0.4917f, 0.4069f, 0.4999f, 0.6866f, 0.4093f, + 0.5709f, 0.6065f, 0.6415f, 0.4944f, 0.5726f, 1.2042f, 0.5458f, 1.6887f, + 0.3971f, 1.0600f, 0.3943f, 0.5537f, 0.5444f, 0.4089f, 0.7468f, 0.7744f}; } } diff --git a/src/z_image.hpp b/src/z_image.hpp index 53a7cf824..e16e179db 100644 --- a/src/z_image.hpp +++ b/src/z_image.hpp @@ -31,10 +31,6 @@ namespace ZImage { : head_dim(head_dim), num_heads(num_heads), num_kv_heads(num_kv_heads), qk_norm(qk_norm) { blocks["qkv"] = std::make_shared(hidden_size, (num_heads + num_kv_heads * 2) * head_dim, false); float scale = 1.f; -#if GGML_USE_HIP - // Prevent NaN issues with certain ROCm setups - scale = 1.f / 16.f; -#endif blocks["out"] = std::make_shared(num_heads * head_dim, hidden_size, false, false, false, scale); if (qk_norm) { blocks["q_norm"] = std::make_shared(head_dim); @@ -52,6 +48,10 @@ namespace ZImage { auto qkv_proj = std::dynamic_pointer_cast(blocks["qkv"]); auto out_proj = std::dynamic_pointer_cast(blocks["out"]); + if (sd_backend_is(ctx->backend, "ROCm")) { + out_proj->set_scale(1.f / 16.f); + } + auto qkv = qkv_proj->forward(ctx, x); // [N, n_token, (num_heads + num_kv_heads*2)*head_dim] qkv = ggml_reshape_4d(ctx->ggml_ctx, qkv, head_dim, num_heads + num_kv_heads * 2, qkv->ne[1], qkv->ne[2]); // [N, n_token, num_heads + num_kv_heads*2, head_dim] @@ -115,9 +115,7 @@ namespace ZImage { bool force_prec_f32 = false; float scale = 1.f / 128.f; -#ifdef SD_USE_VULKAN - force_prec_f32 = true; -#endif + // The purpose of the scale here is to prevent NaN issues in certain situations. // For example, when using CUDA but the weights are k-quants. blocks["w2"] = std::make_shared(hidden_dim, dim, false, false, force_prec_f32, scale); @@ -129,6 +127,10 @@ namespace ZImage { auto w2 = std::dynamic_pointer_cast(blocks["w2"]); auto w3 = std::dynamic_pointer_cast(blocks["w3"]); + if (sd_backend_is(ctx->backend, "Vulkan")) { + w2->set_force_prec_f32(true); + } + auto x1 = w1->forward(ctx, x); auto x3 = w3->forward(ctx, x); x = ggml_swiglu_split(ctx->ggml_ctx, x1, x3);