@@ -3,6 +3,7 @@ LLAMA_VERSION?=
33
44CMAKE_ARGS? =
55BUILD_TYPE? =
6+ ONEAPI_VARS? =/opt/intel/oneapi/setvars.sh
67
78# If build type is cublas, then we set -DLLAMA_CUBLAS=ON to CMAKE_ARGS automatically
89ifeq ($(BUILD_TYPE ) ,cublas)
@@ -19,6 +20,14 @@ else ifeq ($(BUILD_TYPE),hipblas)
1920 CMAKE_ARGS+=-DLLAMA_HIPBLAS=ON
2021endif
2122
23+ ifeq ($(BUILD_TYPE ) ,sycl_f16)
24+ CMAKE_ARGS+=-DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL_F16=ON
25+ endif
26+
27+ ifeq ($(BUILD_TYPE ) ,sycl_f32)
28+ CMAKE_ARGS+=-DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx
29+ endif
30+
2231llama.cpp :
2332 git clone --recurse-submodules https://github.com/ggerganov/llama.cpp llama.cpp
2433 if [ -z " $( LLAMA_VERSION) " ]; then \
@@ -31,6 +40,7 @@ llama.cpp/examples/grpc-server:
3140 cp -r $(abspath ./) /CMakeLists.txt llama.cpp/examples/grpc-server/
3241 cp -r $(abspath ./) /grpc-server.cpp llama.cpp/examples/grpc-server/
3342 cp -rfv $(abspath ./) /json.hpp llama.cpp/examples/grpc-server/
43+ cp -rfv $(abspath ./) /utils.hpp llama.cpp/examples/grpc-server/
3444 echo " add_subdirectory(grpc-server)" >> llama.cpp/examples/CMakeLists.txt
3545# # XXX: In some versions of CMake clip wasn't being built before llama.
3646# # This is an hack for now, but it should be fixed in the future.
@@ -49,5 +59,10 @@ clean:
4959 rm -rf grpc-server
5060
5161grpc-server : llama.cpp llama.cpp/examples/grpc-server
62+ ifneq (,$(findstring sycl,$(BUILD_TYPE ) ) )
63+ bash -c "source $(ONEAPI_VARS); \
64+ cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release"
65+ else
5266 cd llama.cpp && mkdir -p build && cd build && cmake .. $(CMAKE_ARGS) && cmake --build . --config Release
67+ endif
5368 cp llama.cpp/build/bin/grpc-server .
0 commit comments