File tree Expand file tree Collapse file tree 4 files changed +20
-6
lines changed
examples/models/llama/runner Expand file tree Collapse file tree 4 files changed +20
-6
lines changed Original file line number Diff line number Diff line change 4747)
4848list (APPEND _llama_runner__srcs
4949 ${CMAKE_CURRENT_SOURCE_DIR} /../tokenizer/llama_tiktoken.cpp
50- )
51- list (
52- APPEND _llama_runner__srcs
53- ${CMAKE_CURRENT_SOURCE_DIR} /../../../../extension/llm/tokenizer/hf_tokenizer.cpp
5450)
5551
5652if (CMAKE_TOOLCHAIN_IOS
@@ -77,10 +73,19 @@ add_subdirectory(
7773 ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/re2
7874 ${CMAKE_CURRENT_BINARY_DIR} /re2
7975)
76+ add_subdirectory (
77+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/json
78+ ${CMAKE_CURRENT_BINARY_DIR} /json
79+ )
80+ target_include_directories (llama_runner
81+ PRIVATE ${CMAKE_INSTALL_PREFIX} /include
82+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/include
83+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/src
84+ )
8085set (CMAKE_POSITION_INDEPENDENT_CODE ${_pic_flag} )
8186
8287set (llama_runner_deps executorch extension_data_loader extension_module
83- extension_tensor re2::re2
88+ extension_tensor re2::re2 nlohmann_json::nlohmann_json
8489)
8590
8691target_link_libraries (llama_runner PUBLIC ${llama_runner_deps} )
Original file line number Diff line number Diff line change @@ -80,8 +80,10 @@ Error Runner::load() {
8080 tokenizer_ = nullptr ;
8181 // Check if tokenizer_path_ ends with ".json".
8282 if (tokenizer_path_.size () >= 5 &&
83+
8384 tokenizer_path_.compare (tokenizer_path_.size () - 5 , 5 , " .json" ) == 0 ) {
8485 tokenizer_ = std::make_unique<tokenizers::HFTokenizer>();
86+ ET_LOG (Info, " Loading json tokenizer" );
8587 tokenizer_->load (tokenizer_path_);
8688 ET_LOG (
8789 Info, " Loaded tokenizer %s as HF tokenizer" , tokenizer_path_.c_str ());
Original file line number Diff line number Diff line change @@ -49,7 +49,7 @@ def define_common_targets():
4949 "//executorch/runtime/core/exec_aten/util:tensor_util" + aten_suffix ,
5050 "//executorch/examples/models/llama/tokenizer:tiktoken" ,
5151 "//pytorch/tokenizers:llama2c_tokenizer" ,
52- "//pytorch/tokenizers:hf_tokenizer" ,
52+ "//pytorch/tokenizers:hf_tokenizer" ,
5353 ] + (_get_operator_lib (aten )) + ([
5454 # Vulkan API currently cannot build on some platforms (e.g. Apple, FBCODE)
5555 # Therefore enable it explicitly for now to avoid failing tests
Original file line number Diff line number Diff line change @@ -49,6 +49,13 @@ set(runner_deps executorch extension_data_loader extension_module
4949
5050target_link_libraries (extension_llm_runner PUBLIC ${runner_deps} )
5151
52+ target_include_directories (
53+ extension_llm_runner
54+ PUBLIC
55+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/include
56+ ${EXECUTORCH_ROOT} /extension/llm/tokenizers/third-party/llama.cpp-unicode/src
57+ )
58+
5259target_include_directories (
5360 extension_llm_runner INTERFACE ${_common_include_directories}
5461 ${EXECUTORCH_ROOT} /extension/llm/tokenizers/include
You can’t perform that action at this time.
0 commit comments