From e14d9d83ce04dfbdc602c9ddac3c1fdab621519f Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 5 Jun 2025 19:01:31 +0200 Subject: [PATCH 01/13] llama : add thread safety test llama : ignore main_gpu <= 0 if there are no GPUs ggml-ci --- ci/run.sh | 2 +- src/llama.cpp | 2 +- tests/CMakeLists.txt | 2 + tests/test-thread-safety.cpp | 148 +++++++++++++++++++++++++++++++++++ 4 files changed, 152 insertions(+), 2 deletions(-) create mode 100644 tests/test-thread-safety.cpp diff --git a/ci/run.sh b/ci/run.sh index 2968a7dd48d42..94005570511b6 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -39,7 +39,7 @@ sd=`dirname $0` cd $sd/../ SRC=`pwd` -CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=OFF" +CMAKE_EXTRA="-DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON" if [ ! -z ${GG_BUILD_METAL} ]; then CMAKE_EXTRA="${CMAKE_EXTRA} -DGGML_METAL=ON -DGGML_METAL_USE_BF16=ON" diff --git a/src/llama.cpp b/src/llama.cpp index 2f06e0f8ce12d..7fe63d01dd1be 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -197,7 +197,7 @@ static struct llama_model * llama_model_load_from_file_impl( } // if using single GPU mode, remove all except the main GPU - if (params.split_mode == LLAMA_SPLIT_MODE_NONE) { + if (params.split_mode == LLAMA_SPLIT_MODE_NONE && !model->devices.empty() && params.main_gpu >= 0) { if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) { LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size()); llama_model_free(model); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 83f7d1a4584f7..9562b031fa17a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -151,6 +151,8 @@ llama_build_and_test(test-json-partial.cpp) llama_build_and_test(test-log.cpp) llama_build_and_test(test-regex-partial.cpp) +llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/Qwen3-0.6B-GGUF:Q8_0 -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4) + # this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135) if (NOT WIN32) llama_build_and_test(test-arg-parser.cpp) diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp new file mode 100644 index 0000000000000..1b6497f9d3e72 --- /dev/null +++ b/tests/test-thread-safety.cpp @@ -0,0 +1,148 @@ +// thread safety test +// - Loads a copy of the same model on each GPU, plus a copy on the CPU +// - Creates n_parallel (--parallel) contexts per model +// - Runs inference in parallel on each context + +#include +#include +#include +#include "llama.h" +#include "arg.h" +#include "common.h" +#include "log.h" +#include "sampling.h" + +int main(int argc, char ** argv) { + common_params params; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) { + return 1; + } + + common_init(); + + llama_backend_init(); + llama_numa_init(params.numa); + + llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) { + if (level == GGML_LOG_LEVEL_ERROR) { + common_log_add(common_log_main(), level, "%s", text); + } + }, NULL); + + auto mparams = common_model_params_to_llama(params); + auto cparams = common_context_params_to_llama(params); + + int dev_count = ggml_backend_dev_count(); + int gpu_dev_count = 0; + for (int i = 0; i < dev_count; ++i) { + auto * dev = ggml_backend_dev_get(i); + if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) { + gpu_dev_count++; + } + } + const int num_models = gpu_dev_count + 1; // GPUs + 1 CPU model + //const int num_models = std::max(1, gpu_dev_count); + const int num_contexts = std::max(1, params.n_parallel); + + struct model_context { + llama_model_ptr model; + std::vector contexts; + std::vector> samplers; + }; + + std::vector models; + std::vector threads; + std::atomic failed = false; + + for (int m = 0; m < num_models; ++m) { + model_context this_model; + + mparams.split_mode = LLAMA_SPLIT_MODE_NONE; + mparams.main_gpu = m < gpu_dev_count ? m : -1; + + llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); + if (model == NULL) { + LOG_ERR("%s: failed to load model '%s'\n", __func__, params.model.path.c_str()); + return 1; + } + + this_model.model.reset(model); + + for (int c = 0; c < num_contexts; ++c) { + LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models); + llama_context * ctx = llama_init_from_model(model, cparams); + if (ctx == NULL) { + LOG_ERR("%s: failed to create context\n", __func__); + return 1; + } + this_model.contexts.emplace_back(ctx); + + common_sampler * sampler = common_sampler_init(model, params.sampling); + if (sampler == NULL) { + LOG_ERR("%s: failed to create sampler\n", __func__); + return 1; + } + this_model.samplers.emplace_back(sampler, common_sampler_free); + + threads.emplace_back([model, ctx, sampler, ¶ms, &failed, m, c, num_models, num_contexts]() { + llama_batch batch = {}; + { + auto prompt = common_tokenize(ctx, params.prompt, true); + if (prompt.empty()) { + LOG_ERR("failed to tokenize prompt\n"); + failed.store(true); + return; + } + batch = llama_batch_get_one(prompt.data(), prompt.size()); + if (llama_decode(ctx, batch)) { + LOG_ERR("failed to decode prompt\n"); + failed.store(true); + return; + } + } + + const auto * vocab = llama_model_get_vocab(model); + std::string result = params.prompt; + + for (int i = 0; i < params.n_predict; i++) { + llama_token token; + if (batch.n_tokens > 0) { + token = common_sampler_sample(sampler, ctx, batch.n_tokens - 1); + } else { + token = llama_vocab_bos(vocab); + } + + if (llama_vocab_is_eog(vocab, token)) { + break; + } + result += common_token_to_piece(ctx, token); + + batch = llama_batch_get_one(&token, 1); + if (llama_decode(ctx, batch)) { + LOG_ERR("failed to decode\n"); + failed.store(true); + return; + } + } + + LOG_INF("Model %d/%d, Context %d/%d: Result: '%s'\n", m + 1, num_models, c + 1, num_contexts, result.c_str()); + }); + + } + + models.emplace_back(std::move(this_model)); + } + + for (auto & thread : threads) { + thread.join(); + } + + if (failed) { + LOG_ERR("One or more threads failed.\n"); + return 1; + } + + LOG_INF("All threads completed successfully.\n"); + return 0; +} From bf4530006335c5a1349391cecbe0212cf168bfb4 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 6 Jun 2025 11:49:53 +0200 Subject: [PATCH 02/13] use smaller stories15M-q4_0.gguf model ggml-ci --- tests/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9562b031fa17a..a2ba312d78a4c 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -151,7 +151,7 @@ llama_build_and_test(test-json-partial.cpp) llama_build_and_test(test-log.cpp) llama_build_and_test(test-regex-partial.cpp) -llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/Qwen3-0.6B-GGUF:Q8_0 -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4) +llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4) # this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135) if (NOT WIN32) From b046f0ce11509d47282e3fd50d45fbe650ceefd4 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 6 Jun 2025 14:12:57 +0200 Subject: [PATCH 03/13] test ggml-ci --- .github/workflows/build.yml | 1 + common/common.cpp | 16 ++++++++++++---- tests/test-thread-safety.cpp | 12 +++++++----- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 867a589ce1648..03a95ef5bd641 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -777,6 +777,7 @@ jobs: cmake -S . -B build ${{ matrix.defines }} ` -DCURL_LIBRARY="$env:CURL_PATH/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:CURL_PATH/include" cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} + cp $env:CURL_PATH/bin/libcurl-*.dll build/bin/Release - name: Add libopenblas.dll id: add_libopenblas_dll diff --git a/common/common.cpp b/common/common.cpp index 4cc40ed8b37a4..1874d4a907389 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -767,6 +767,9 @@ bool fs_validate_filename(const std::string & filename) { return true; } +#include + + // returns true if successful, false otherwise bool fs_create_directory_with_parents(const std::string & path) { #ifdef _WIN32 @@ -784,11 +787,18 @@ bool fs_create_directory_with_parents(const std::string & path) { // process path from front to back, procedurally creating directories while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) { const std::wstring subpath = wpath.substr(0, pos_slash); - const wchar_t * test = subpath.c_str(); + pos_slash += 1; + // skip the drive letter, in some systems it can return an access denied error + if (subpath.length() == 2 && subpath[1] == ':') { + continue; + } + + const bool success = CreateDirectoryW(subpath.c_str(), NULL); - const bool success = CreateDirectoryW(test, NULL); + std::wcout << "CreateDirectoryW " << subpath << " returned: " << (success ? "true" : "false") << std::endl; if (!success) { const DWORD error = GetLastError(); + std::wcout << "GetLastError returned: " << error << std::endl; // if the path already exists, ensure that it's a directory if (error == ERROR_ALREADY_EXISTS) { @@ -800,8 +810,6 @@ bool fs_create_directory_with_parents(const std::string & path) { return false; } } - - pos_slash += 1; } return true; diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp index 1b6497f9d3e72..2a08f27ffabd4 100644 --- a/tests/test-thread-safety.cpp +++ b/tests/test-thread-safety.cpp @@ -24,11 +24,13 @@ int main(int argc, char ** argv) { llama_backend_init(); llama_numa_init(params.numa); - llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) { - if (level == GGML_LOG_LEVEL_ERROR) { - common_log_add(common_log_main(), level, "%s", text); - } - }, NULL); + LOG_INF("%s\n", common_params_get_system_info(params).c_str()); + + //llama_log_set([](ggml_log_level level, const char * text, void * /*user_data*/) { + // if (level == GGML_LOG_LEVEL_ERROR) { + // common_log_add(common_log_main(), level, "%s", text); + // } + //}, NULL); auto mparams = common_model_params_to_llama(params); auto cparams = common_context_params_to_llama(params); From 169774ad9220e9193c5cdaaa866b26ad2da1df89 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 6 Jun 2025 16:23:11 +0300 Subject: [PATCH 04/13] llamafile : remove global state ggml-ci --- ggml/src/ggml-cpu/llamafile/sgemm.cpp | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 1d46158f928c4..acef51a9c226f 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -53,7 +53,6 @@ #include "ggml-cpu-impl.h" #include "ggml-quants.h" -#include #include #include @@ -394,8 +393,6 @@ class tinyBLAS { template NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) { - static std::atomic current_chunk; - GGML_ASSERT(m % (RM * BM) == 0); const int64_t ytiles = m / (RM * BM); const int64_t xtiles = (n + RN -1) / RN; @@ -409,8 +406,6 @@ class tinyBLAS { if (params->ith == 0) { GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); - // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - std::atomic_store_explicit(¤t_chunk, (int64_t)params->nth, std::memory_order_relaxed); } ggml_barrier(params->threadpool); @@ -439,8 +434,7 @@ class tinyBLAS { GGML_ASSERT(jj == jj2); } - // next step. - job = std::atomic_fetch_add_explicit(¤t_chunk, (int64_t)1, std::memory_order_relaxed); + job += params->nth; } ggml_barrier(params->threadpool); From 8ef4b950b4d75d9570c896a1cdcd8c4bb46c21bd Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 6 Jun 2025 16:50:18 +0300 Subject: [PATCH 05/13] cont : reuse current_chunk from ggml_threadpool ggml-ci --- ggml/src/ggml-cpu/ggml-cpu-impl.h | 3 +++ ggml/src/ggml-cpu/ggml-cpu.c | 8 ++++++++ ggml/src/ggml-cpu/llamafile/sgemm.cpp | 4 +++- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index b3f1b5ca79092..2232cf38b03ad 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -503,6 +503,9 @@ static __m256 __lasx_xvreplfr2vr_s(const float val) { // TODO: move to ggml-threading void ggml_barrier(struct ggml_threadpool * tp); +void ggml_threadpool_chunk_set(struct ggml_threadpool * threadpool, int value); +int ggml_threadpool_chunk_add(struct ggml_threadpool * threadpool, int value); + #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index c7426df2b851b..6344795d90775 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -559,6 +559,14 @@ void ggml_barrier(struct ggml_threadpool * tp) { #endif } +void ggml_threadpool_chunk_set(struct ggml_threadpool * threadpool, int value) { + atomic_store_explicit(&threadpool->current_chunk, value, memory_order_release); +} + +int ggml_threadpool_chunk_add(struct ggml_threadpool * threadpool, int value) { + return atomic_fetch_add_explicit(&threadpool->current_chunk, value, memory_order_acq_rel); +} + #if defined(__gnu_linux__) static cpu_set_t ggml_get_numa_affinity(void) { cpu_set_t cpuset; diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index acef51a9c226f..1c545f803327b 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -406,6 +406,8 @@ class tinyBLAS { if (params->ith == 0) { GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles); + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + ggml_threadpool_chunk_set(params->threadpool, params->nth); } ggml_barrier(params->threadpool); @@ -434,7 +436,7 @@ class tinyBLAS { GGML_ASSERT(jj == jj2); } - job += params->nth; + job = ggml_threadpool_chunk_add(params->threadpool, 1); } ggml_barrier(params->threadpool); From 03da6c83f54a293a7447c88ae357860418b71e0e Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 6 Jun 2025 17:00:22 +0300 Subject: [PATCH 06/13] cont : memory order relaxed ggml-ci --- ggml/src/ggml-cpu/ggml-cpu-impl.h | 4 ++-- ggml/src/ggml-cpu/ggml-cpu.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-impl.h b/ggml/src/ggml-cpu/ggml-cpu-impl.h index 2232cf38b03ad..bbd93c0ef66fe 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-impl.h +++ b/ggml/src/ggml-cpu/ggml-cpu-impl.h @@ -503,8 +503,8 @@ static __m256 __lasx_xvreplfr2vr_s(const float val) { // TODO: move to ggml-threading void ggml_barrier(struct ggml_threadpool * tp); -void ggml_threadpool_chunk_set(struct ggml_threadpool * threadpool, int value); -int ggml_threadpool_chunk_add(struct ggml_threadpool * threadpool, int value); +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value); +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value); #ifdef __cplusplus } diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 6344795d90775..79403dbd991ee 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -559,12 +559,12 @@ void ggml_barrier(struct ggml_threadpool * tp) { #endif } -void ggml_threadpool_chunk_set(struct ggml_threadpool * threadpool, int value) { - atomic_store_explicit(&threadpool->current_chunk, value, memory_order_release); +void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) { + atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed); } -int ggml_threadpool_chunk_add(struct ggml_threadpool * threadpool, int value) { - return atomic_fetch_add_explicit(&threadpool->current_chunk, value, memory_order_acq_rel); +int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) { + return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed); } #if defined(__gnu_linux__) From 00ad1774fc5128b3563bb015e083b663f24250d0 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 6 Jun 2025 20:37:25 +0200 Subject: [PATCH 07/13] cleanup --- common/common.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 1874d4a907389..6f6f2dedb7cc9 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -787,7 +787,9 @@ bool fs_create_directory_with_parents(const std::string & path) { // process path from front to back, procedurally creating directories while ((pos_slash = path.find('\\', pos_slash)) != std::string::npos) { const std::wstring subpath = wpath.substr(0, pos_slash); + pos_slash += 1; + // skip the drive letter, in some systems it can return an access denied error if (subpath.length() == 2 && subpath[1] == ':') { continue; @@ -795,10 +797,8 @@ bool fs_create_directory_with_parents(const std::string & path) { const bool success = CreateDirectoryW(subpath.c_str(), NULL); - std::wcout << "CreateDirectoryW " << subpath << " returned: " << (success ? "true" : "false") << std::endl; if (!success) { const DWORD error = GetLastError(); - std::wcout << "GetLastError returned: " << error << std::endl; // if the path already exists, ensure that it's a directory if (error == ERROR_ALREADY_EXISTS) { From 292c4e7eaf43e25de70876210db80e453d37ff10 Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 6 Jun 2025 20:52:19 +0200 Subject: [PATCH 08/13] move context creation to the threads to test it too ggml-ci --- tests/test-thread-safety.cpp | 58 +++++++++++++++--------------------- 1 file changed, 24 insertions(+), 34 deletions(-) diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp index 2a08f27ffabd4..2114b1b7f6a98 100644 --- a/tests/test-thread-safety.cpp +++ b/tests/test-thread-safety.cpp @@ -47,19 +47,11 @@ int main(int argc, char ** argv) { //const int num_models = std::max(1, gpu_dev_count); const int num_contexts = std::max(1, params.n_parallel); - struct model_context { - llama_model_ptr model; - std::vector contexts; - std::vector> samplers; - }; - - std::vector models; + std::vector models; std::vector threads; std::atomic failed = false; for (int m = 0; m < num_models; ++m) { - model_context this_model; - mparams.split_mode = LLAMA_SPLIT_MODE_NONE; mparams.main_gpu = m < gpu_dev_count ? m : -1; @@ -69,35 +61,36 @@ int main(int argc, char ** argv) { return 1; } - this_model.model.reset(model); + models.emplace_back(model); for (int c = 0; c < num_contexts; ++c) { - LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models); - llama_context * ctx = llama_init_from_model(model, cparams); - if (ctx == NULL) { - LOG_ERR("%s: failed to create context\n", __func__); - return 1; - } - this_model.contexts.emplace_back(ctx); - - common_sampler * sampler = common_sampler_init(model, params.sampling); - if (sampler == NULL) { - LOG_ERR("%s: failed to create sampler\n", __func__); - return 1; - } - this_model.samplers.emplace_back(sampler, common_sampler_free); - - threads.emplace_back([model, ctx, sampler, ¶ms, &failed, m, c, num_models, num_contexts]() { + threads.emplace_back([&, m, c, model]() { + LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models); + + llama_context_ptr ctx { llama_init_from_model(model, cparams) }; + if (ctx == NULL) { + LOG_ERR("failed to create context\n"); + failed.store(true); + return; + } + + std::unique_ptr sampler { common_sampler_init(model, params.sampling), common_sampler_free }; + if (sampler == NULL) { + LOG_ERR("failed to create sampler\n"); + failed.store(true); + return; + } + llama_batch batch = {}; { - auto prompt = common_tokenize(ctx, params.prompt, true); + auto prompt = common_tokenize(ctx.get(), params.prompt, true); if (prompt.empty()) { LOG_ERR("failed to tokenize prompt\n"); failed.store(true); return; } batch = llama_batch_get_one(prompt.data(), prompt.size()); - if (llama_decode(ctx, batch)) { + if (llama_decode(ctx.get(), batch)) { LOG_ERR("failed to decode prompt\n"); failed.store(true); return; @@ -110,7 +103,7 @@ int main(int argc, char ** argv) { for (int i = 0; i < params.n_predict; i++) { llama_token token; if (batch.n_tokens > 0) { - token = common_sampler_sample(sampler, ctx, batch.n_tokens - 1); + token = common_sampler_sample(sampler.get(), ctx.get(), batch.n_tokens - 1); } else { token = llama_vocab_bos(vocab); } @@ -118,10 +111,10 @@ int main(int argc, char ** argv) { if (llama_vocab_is_eog(vocab, token)) { break; } - result += common_token_to_piece(ctx, token); + result += common_token_to_piece(ctx.get(), token); batch = llama_batch_get_one(&token, 1); - if (llama_decode(ctx, batch)) { + if (llama_decode(ctx.get(), batch)) { LOG_ERR("failed to decode\n"); failed.store(true); return; @@ -130,10 +123,7 @@ int main(int argc, char ** argv) { LOG_INF("Model %d/%d, Context %d/%d: Result: '%s'\n", m + 1, num_models, c + 1, num_contexts, result.c_str()); }); - } - - models.emplace_back(std::move(this_model)); } for (auto & thread : threads) { From 2158fd0a4c75b6446731462bca821e6ea9e43d6a Mon Sep 17 00:00:00 2001 From: slaren Date: Fri, 6 Jun 2025 21:13:47 +0200 Subject: [PATCH 09/13] load all models first --- tests/test-thread-safety.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp index 2114b1b7f6a98..d7ec5e2fbdaa1 100644 --- a/tests/test-thread-safety.cpp +++ b/tests/test-thread-safety.cpp @@ -62,7 +62,10 @@ int main(int argc, char ** argv) { } models.emplace_back(model); + } + for (int m = 0; m < num_models; ++m) { + auto * model = models[m].get(); for (int c = 0; c < num_contexts; ++c) { threads.emplace_back([&, m, c, model]() { LOG_INF("Creating context %d/%d for model %d/%d\n", c + 1, num_contexts, m + 1, num_models); From 9381f4ea21a57ccd9b76d60079265075dc22880d Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 9 Jun 2025 16:39:23 +0200 Subject: [PATCH 10/13] disable vulkan tests --- .github/workflows/build.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 03a95ef5bd641..1104111392ad9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -304,10 +304,12 @@ jobs: - name: Test id: cmake_test + # disabled due to failing thread safety test: https://github.com/ggml-org/llama.cpp/pull/14035 + if: false run: | cd build # This is using llvmpipe and runs slower than other backends - ctest -L main --verbose --timeout 3600 + # ctest -L main --verbose --timeout 3600 ubuntu-22-cmake-hip: runs-on: ubuntu-22.04 From f422a3ebf736cf7f5ad9be5ed2646800a0cf8f33 Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 16 Jun 2025 13:46:28 +0200 Subject: [PATCH 11/13] Revert "disable vulkan tests" This reverts commit 9381f4ea21a57ccd9b76d60079265075dc22880d. --- .github/workflows/build.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 94f6b43df6140..2a662c08835da 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -304,12 +304,10 @@ jobs: - name: Test id: cmake_test - # disabled due to failing thread safety test: https://github.com/ggml-org/llama.cpp/pull/14035 - if: false run: | cd build # This is using llvmpipe and runs slower than other backends - # ctest -L main --verbose --timeout 3600 + ctest -L main --verbose --timeout 3600 ubuntu-22-cmake-hip: runs-on: ubuntu-22.04 From bbd8b668bdb4bd0b54a8a4ea326c18222bd2691b Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 16 Jun 2025 14:55:08 +0200 Subject: [PATCH 12/13] llama : better LLAMA_SPLIT_MODE_NONE logic when main_gpu < 0 GPU devices are not used --- src/llama.cpp | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 7fe63d01dd1be..34906cdb62844 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -197,15 +197,19 @@ static struct llama_model * llama_model_load_from_file_impl( } // if using single GPU mode, remove all except the main GPU - if (params.split_mode == LLAMA_SPLIT_MODE_NONE && !model->devices.empty() && params.main_gpu >= 0) { - if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) { - LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size()); - llama_model_free(model); - return nullptr; + if (params.split_mode == LLAMA_SPLIT_MODE_NONE) { + if (params.main_gpu < 0) { + model->devices.clear(); + } else { + if (params.main_gpu >= (int)model->devices.size()) { + LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %zu)\n", __func__, params.main_gpu, model->devices.size()); + llama_model_free(model); + return nullptr; + } + ggml_backend_dev_t main_gpu = model->devices[params.main_gpu]; + model->devices.clear(); + model->devices.push_back(main_gpu); } - ggml_backend_dev_t main_gpu = model->devices[params.main_gpu]; - model->devices.clear(); - model->devices.push_back(main_gpu); } for (auto * dev : model->devices) { From 5b10edfe34390d546589a6e752e341669c76aa64 Mon Sep 17 00:00:00 2001 From: slaren Date: Mon, 16 Jun 2025 15:05:48 +0200 Subject: [PATCH 13/13] add CPU only test + default split test ggml-ci --- tests/test-thread-safety.cpp | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/test-thread-safety.cpp b/tests/test-thread-safety.cpp index d7ec5e2fbdaa1..d525b7430f9d9 100644 --- a/tests/test-thread-safety.cpp +++ b/tests/test-thread-safety.cpp @@ -32,7 +32,6 @@ int main(int argc, char ** argv) { // } //}, NULL); - auto mparams = common_model_params_to_llama(params); auto cparams = common_context_params_to_llama(params); int dev_count = ggml_backend_dev_count(); @@ -43,7 +42,7 @@ int main(int argc, char ** argv) { gpu_dev_count++; } } - const int num_models = gpu_dev_count + 1; // GPUs + 1 CPU model + const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split //const int num_models = std::max(1, gpu_dev_count); const int num_contexts = std::max(1, params.n_parallel); @@ -52,8 +51,17 @@ int main(int argc, char ** argv) { std::atomic failed = false; for (int m = 0; m < num_models; ++m) { - mparams.split_mode = LLAMA_SPLIT_MODE_NONE; - mparams.main_gpu = m < gpu_dev_count ? m : -1; + auto mparams = common_model_params_to_llama(params); + + if (m < gpu_dev_count) { + mparams.split_mode = LLAMA_SPLIT_MODE_NONE; + mparams.main_gpu = m; + } else if (m == gpu_dev_count) { + mparams.split_mode = LLAMA_SPLIT_MODE_NONE; + mparams.main_gpu = -1; // CPU model + } else { + mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;; + } llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams); if (model == NULL) { @@ -111,20 +119,21 @@ int main(int argc, char ** argv) { token = llama_vocab_bos(vocab); } + result += common_token_to_piece(ctx.get(), token); + if (llama_vocab_is_eog(vocab, token)) { break; } - result += common_token_to_piece(ctx.get(), token); batch = llama_batch_get_one(&token, 1); if (llama_decode(ctx.get(), batch)) { - LOG_ERR("failed to decode\n"); + LOG_ERR("Model %d/%d, Context %d/%d: failed to decode\n", m + 1, num_models, c + 1, num_contexts); failed.store(true); return; } } - LOG_INF("Model %d/%d, Context %d/%d: Result: '%s'\n", m + 1, num_models, c + 1, num_contexts, result.c_str()); + LOG_INF("Model %d/%d, Context %d/%d: %s\n\n", m + 1, num_models, c + 1, num_contexts, result.c_str()); }); } } @@ -138,6 +147,6 @@ int main(int argc, char ** argv) { return 1; } - LOG_INF("All threads completed successfully.\n"); + LOG_INF("All threads finished without errors.\n"); return 0; }