Skip to content

Commit f21e5d5

Browse files
committed
CUBLAS -> CUDA
1 parent 937dbbd commit f21e5d5

File tree

3 files changed

+9
-9
lines changed

3 files changed

+9
-9
lines changed

CMakeLists.txt

+4-4
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@ option(DEBUG "Debug mode" OFF)
77
option(GGML_CUDA "cuda mode" OFF)
88

99
if(DEBUG)
10-
set( CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g3 -O0" )
11-
set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g3 -O0" )
12-
set( CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -g3 -O0" )
13-
set( CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} -g3 -O0" )
10+
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g3 -O0")
11+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g3 -O0")
12+
set(CMAKE_LD_FLAGS "${CMAKE_LD_FLAGS} -g3 -O0")
13+
set(CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} -g3 -O0")
1414
set(GGML_DEBUG ON CACHE BOOL "Debug mode")
1515
endif()
1616

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ This is tested with mac os arm
2323
````
2424
mkdir build
2525
cd build
26-
cmake .. -DGGML_CUBLAS=ON
26+
cmake .. -DGGML_CUDA=ON
2727
make
2828
````
2929
This is tested with Ubuntu 22.04 and cuda 12.0 and a 1070ti

main.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#include "ggml/include/ggml-backend.h"
33
#include "ggml/include/ggml.h"
44

5-
#ifdef GGML_USE_CUBLAS
5+
#ifdef GGML_USE_CUDA
66
#include "ggml-cuda.h"
77
#endif
88

@@ -612,7 +612,7 @@ bool autoregressive_model_load(const std::string &fname,
612612
}
613613

614614
// initialize the backend
615-
#ifdef GGML_USE_CUBLAS
615+
#ifdef GGML_USE_CUDA
616616
fprintf(stderr, "%s: using CUDA backend\n", __func__);
617617
model.backend = ggml_backend_cuda_init(0);
618618
if (!model.backend) {
@@ -1074,7 +1074,7 @@ bool diffusion_model_load(const std::string &fname, diffusion_model &model) {
10741074
}
10751075

10761076
// initialize the backend
1077-
#ifdef GGML_USE_CUBLAS
1077+
#ifdef GGML_USE_CUDA
10781078
fprintf(stderr, "%s: using CUDA backend\n", __func__);
10791079
model.backend = ggml_backend_cuda_init(0);
10801080
if (!model.backend) {
@@ -1609,7 +1609,7 @@ bool vocoder_model_load(const std::string &fname, vocoder_model &model) {
16091609
}
16101610

16111611
// initialize the backend
1612-
#ifdef GGML_USE_CUBLAS
1612+
#ifdef GGML_USE_CUDA
16131613
fprintf(stderr, "%s: using CUDA backend\n", __func__);
16141614
model.backend = ggml_backend_cuda_init(0);
16151615
if (!model.backend) {

0 commit comments

Comments
 (0)