Skip to content

Commit d2d6269

Browse files
authored
Install rpc-server when GGML_RPC is ON. (#17149)
1 parent 2fc392c commit d2d6269

File tree

2 files changed

+6
-0
lines changed

2 files changed

+6
-0
lines changed

.devops/nix/package.nix

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@
3434
rocmGpuTargets ? builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets,
3535
enableCurl ? true,
3636
useVulkan ? false,
37+
useRpc ? false,
3738
llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
3839

3940
# It's necessary to consistently use backendStdenv when building with CUDA support,
@@ -175,6 +176,7 @@ effectiveStdenv.mkDerivation (finalAttrs: {
175176
(cmakeBool "GGML_METAL" useMetalKit)
176177
(cmakeBool "GGML_VULKAN" useVulkan)
177178
(cmakeBool "GGML_STATIC" enableStatic)
179+
(cmakeBool "GGML_RPC" useRpc)
178180
]
179181
++ optionals useCuda [
180182
(

tools/rpc/CMakeLists.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,7 @@ set(TARGET rpc-server)
22
add_executable(${TARGET} rpc-server.cpp)
33
target_link_libraries(${TARGET} PRIVATE ggml)
44
target_compile_features(${TARGET} PRIVATE cxx_std_17)
5+
6+
if(LLAMA_TOOLS_INSTALL)
7+
install(TARGETS ${TARGET} RUNTIME)
8+
endif()

0 commit comments

Comments
 (0)