From ed8fe692fbb0612f2b6eaf5c76756808c20a65fe Mon Sep 17 00:00:00 2001 From: Jingyu Date: Sun, 5 Jan 2025 23:51:43 +0800 Subject: [PATCH 1/9] WIP: A temporary call to rk3568's dma failed --- command/build_cross_rk356x_aarch64.sh | 106 ++++++++++++++++++ cpp/inspireface/CMakeLists.txt | 14 ++- .../nexus_processor/image_processor_rga.h | 4 +- cpp/sample/CMakeLists.txt | 12 ++ 4 files changed, 131 insertions(+), 5 deletions(-) create mode 100644 command/build_cross_rk356x_aarch64.sh diff --git a/command/build_cross_rk356x_aarch64.sh b/command/build_cross_rk356x_aarch64.sh new file mode 100644 index 00000000..17b6acbb --- /dev/null +++ b/command/build_cross_rk356x_aarch64.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +# Reusable function to handle 'install' directory operations +move_install_files() { + local root_dir="$1" + local install_dir="$root_dir/install" + + # Step 1: Check if the 'install' directory exists + if [ ! -d "$install_dir" ]; then + echo "Error: 'install' directory does not exist in $root_dir" + exit 1 + fi + + # Step 2: Delete all other files/folders except 'install' + find "$root_dir" -mindepth 1 -maxdepth 1 -not -name "install" -exec rm -rf {} + + + # Step 3: Move all files from 'install' to the root directory + mv "$install_dir"/* "$root_dir" 2>/dev/null + + # Step 4: Remove the empty 'install' directory + rmdir "$install_dir" + + echo "Files from 'install' moved to $root_dir, and 'install' directory deleted." +} + +if [ -n "$VERSION" ]; then + TAG="-$VERSION" +else + TAG="" +fi + + +SCRIPT_DIR=$(pwd) # Project dir + +# # Create .rknpu2_cache directory if it doesn't exist +# CACHE_DIR="$(pwd)/.rknpu2_cache" +# mkdir -p "$CACHE_DIR" + +# # Check if MNN-2.3.0 directory already exists +# if [ ! -d "$CACHE_DIR/MNN-2.3.0" ]; then +# echo "Downloading MNN 2.3.0..." +# # Download MNN 2.3.0 +# if ! wget -P "$CACHE_DIR" https://github.com/alibaba/MNN/archive/refs/tags/2.3.0.zip; then +# echo "Error: Failed to download MNN 2.3.0" +# exit 1 +# fi + +# # Extract the zip file +# cd "$CACHE_DIR" +# if ! unzip 2.3.0.zip; then +# echo "Error: Failed to extract MNN 2.3.0" +# exit 1 +# fi + +# # Remove the zip file +# rm 2.3.0.zip + +# echo "MNN 2.3.0 downloaded and extracted" +# else +# echo "MNN-2.3.0 already exists in cache" +# fi + +# # Set absolute path to MNN source +# export MNN_CUSTOM_SOURCE="$CACHE_DIR/MNN-2.3.0" + +echo "MNN_CUSTOM_SOURCE: ${MNN_CUSTOM_SOURCE}" +cd ${SCRIPT_DIR} + +# export ARM_CROSS_COMPILE_TOOLCHAIN=/root/arm-rockchip830-linux-uclibcgnueabihf/ + +BUILD_FOLDER_PATH="build/inspireface-linux-aarch64-rk356x${TAG}" + +mkdir -p ${BUILD_FOLDER_PATH} +# shellcheck disable=SC2164 +cd ${BUILD_FOLDER_PATH} + +# export cross_compile_toolchain=/home/jingyuyan/software/arm-rockchip830-linux-uclibcgnueabihf + +cmake -DCMAKE_SYSTEM_NAME=Linux \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_SYSTEM_VERSION=1 \ + -DCMAKE_SYSTEM_PROCESSOR=aarch64 \ + -DCMAKE_C_COMPILER=$ARM_CROSS_COMPILE_TOOLCHAIN/bin/aarch64-linux-gnu-gcc \ + -DCMAKE_CXX_COMPILER=$ARM_CROSS_COMPILE_TOOLCHAIN/bin/aarch64-linux-gnu-g++ \ + -DCMAKE_CXX_FLAGS="${CMAKE_CXX_FLAGS} -flax-vector-conversions" \ + -DTARGET_PLATFORM=armlinux \ + -DISF_BUILD_LINUX_AARCH64=ON \ + -DISF_BUILD_LINUX_ARM7=OFF \ + -DMNN_SEP_BUILD=off \ + -DISF_ENABLE_RKNN=ON \ + -DISF_RK_DEVICE_TYPE=RK356X \ + -DISF_RKNPU_MAJOR=rknpu2 \ + -DISF_RK_COMPILER_TYPE=aarch64 \ + -DISF_ENABLE_RGA=ON \ + -DISF_ENABLE_COST_TIME=OFF \ + -DISF_BUILD_WITH_SAMPLE=ON \ + -DISF_BUILD_WITH_TEST=OFF \ + -DISF_ENABLE_BENCHMARK=OFF \ + -DISF_ENABLE_USE_LFW_DATA=OFF \ + -DISF_ENABLE_TEST_EVALUATION=OFF \ + -DISF_BUILD_SHARED_LIBS=OFF ${SCRIPT_DIR} + +make -j4 +# make install + +# move_install_files "$(pwd)" \ No newline at end of file diff --git a/cpp/inspireface/CMakeLists.txt b/cpp/inspireface/CMakeLists.txt index 4a645a95..39261bec 100644 --- a/cpp/inspireface/CMakeLists.txt +++ b/cpp/inspireface/CMakeLists.txt @@ -33,9 +33,14 @@ if (ISF_ENABLE_RKNN) set(RK_PLATFORM "Linux") endif() set(ISF_RKNN_API_INCLUDE_DIRS ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${RK_PLATFORM}/librknn_api/include) - set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${RK_PLATFORM}/librknn_api/${ISF_RK_COMPILER_TYPE}/librknnmrt.a) - # For rknpu2, we recommend linking static libraries by default - set(RKNN_LINKED ${ISF_RKNN_API_LIB}) + if (ISF_RK_COMPILER_TYPE STREQUAL "aarch64") + set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${RK_PLATFORM}/librknn_api/${ISF_RK_COMPILER_TYPE}/) + link_directories(${ISF_RKNN_API_LIB}) + else() + # For rknpu2 with armv7, we recommend linking static libraries by default + set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${RK_PLATFORM}/librknn_api/${ISF_RK_COMPILER_TYPE}/librknnmrt.a) + set(RKNN_LINKED ${ISF_RKNN_API_LIB}) + endif() endif() @@ -65,6 +70,9 @@ if (ISF_ENABLE_RKNN) elseif(ISF_RKNPU_MAJOR STREQUAL "rknpu2") add_definitions("-DINFERENCE_HELPER_ENABLE_RKNN2") endif() + if (ISF_RK_COMPILER_TYPE STREQUAL "aarch64") + set(LINK_THIRD_LIBS ${LINK_THIRD_LIBS} rknnrt) + endif() endif() # cpp yaml diff --git a/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h b/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h index 2f1577c2..87f48470 100644 --- a/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h +++ b/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h @@ -108,9 +108,9 @@ class RgaImageProcessor : public ImageProcessor { channels = c; buffer_size = width * height * channels; - int ret = dma_buf_alloc(RV1106_CMA_HEAP_PATH, buffer_size, &dma_fd, &virtual_addr); + int ret = dma_buf_alloc(DMA_HEAP_PATH, buffer_size, &dma_fd, &virtual_addr); if (ret < 0) { - INSPIRECV_LOG(ERROR) << "Failed to allocate DMA buffer"; + INSPIRECV_LOG(ERROR) << "Failed to allocate DMA buffer: " << ret; return false; } diff --git a/cpp/sample/CMakeLists.txt b/cpp/sample/CMakeLists.txt index 170785bb..8aa242c8 100644 --- a/cpp/sample/CMakeLists.txt +++ b/cpp/sample/CMakeLists.txt @@ -11,6 +11,18 @@ if (ISF_ENABLE_RKNN AND ISF_RKNPU_MAJOR STREQUAL "rknpu1") set(ext rknn_api dl) endif () +if (ISF_ENABLE_RKNN AND ISF_RKNPU_MAJOR STREQUAL "rknpu2" AND ISF_RK_COMPILER_TYPE STREQUAL "aarch64") + if(ANDROID) + set(RK_PLATFORM "Android") + else() + set(RK_PLATFORM "Linux") + endif() + set(ISF_RKNN_API_LIB ${ISF_THIRD_PARTY_DIR}/inspireface-precompile-lite/rknn/${ISF_RKNPU_MAJOR}/runtime/${RK_PLATFORM}/librknn_api/${ISF_RK_COMPILER_TYPE}/) + message("ISF_RKNN_API_LIB: ${ISF_RKNN_API_LIB}") + link_directories(${ISF_RKNN_API_LIB}) + set(ext rknnrt dl) +endif () + add_executable(Leak api/leak.cpp) target_link_libraries(Leak InspireFace ${ext}) set_target_properties(Leak PROPERTIES From bb240a35fbfbdd69adf71f20c5f3baf77056182e Mon Sep 17 00:00:00 2001 From: tunm Date: Mon, 6 Jan 2025 16:14:41 +0800 Subject: [PATCH 2/9] WIP: version + 1 --- CMakeLists.txt | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8abe58fb..f007fa88 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,7 +10,7 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") # Current version set(INSPIRE_FACE_VERSION_MAJOR 1) set(INSPIRE_FACE_VERSION_MINOR 1) -set(INSPIRE_FACE_VERSION_PATCH 9) +set(INSPIRE_FACE_VERSION_PATCH 10) # Converts the version number to a string string(CONCAT INSPIRE_FACE_VERSION_MAJOR_STR ${INSPIRE_FACE_VERSION_MAJOR}) @@ -69,6 +69,12 @@ if (ISF_ENABLE_RKNN) endif() # Result message(STATUS "Use ${ISF_RKNPU_MAJOR}") + + if(ISF_RK_DEVICE_TYPE STREQUAL "RV1106") + set(ISF_RKNPU_RV1106 ON) + add_definitions("-DISF_RKNPU_RV1106") + endif() + endif () # Enable Rockchip RGA @@ -288,6 +294,9 @@ if (ISF_ENABLE_RKNN) message(STATUS "\t ISF_RKNPU_MAJOR: ${ISF_RKNPU_MAJOR}") message(STATUS "\t ISF_RK_DEVICE_TYPE: ${ISF_RK_DEVICE_TYPE}") message(STATUS "\t ISF_RK_COMPILER_TYPE: ${ISF_RK_COMPILER_TYPE}") + if(ISF_RKNPU_RV1106) + message(STATUS "\t ISF_RKNPU_RV1106: ${ISF_RKNPU_RV1106}") + endif() endif () message(STATUS "\t ISF_BUILD_LINUX_ARM7: ${ISF_BUILD_LINUX_ARM7}") message(STATUS "\t ISF_BUILD_LINUX_AARCH64: ${ISF_BUILD_LINUX_AARCH64}") From 6a7d62535b266d68ef1f14e97d7f51e36d2ce9d2 Mon Sep 17 00:00:00 2001 From: Jingyu Date: Tue, 7 Jan 2025 00:22:27 +0800 Subject: [PATCH 3/9] WIP: Must be use DMA_HEAP_DMA32_UNCACHE_PATCH for rk356x --- .../middleware/nexus_processor/image_processor_rga.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h b/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h index 87f48470..9d44852e 100644 --- a/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h +++ b/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h @@ -108,7 +108,7 @@ class RgaImageProcessor : public ImageProcessor { channels = c; buffer_size = width * height * channels; - int ret = dma_buf_alloc(DMA_HEAP_PATH, buffer_size, &dma_fd, &virtual_addr); + int ret = dma_buf_alloc(DMA_HEAP_DMA32_UNCACHE_PATCH, buffer_size, &dma_fd, &virtual_addr); if (ret < 0) { INSPIRECV_LOG(ERROR) << "Failed to allocate DMA buffer: " << ret; return false; From 77d9a5ec825cc0830aa0f112f00eef843efe6c87 Mon Sep 17 00:00:00 2001 From: Jingyu Date: Tue, 7 Jan 2025 12:28:31 +0800 Subject: [PATCH 4/9] WIP: customize the dma heap --- .../initialization_module/launch.cpp | 8 +++++++ .../initialization_module/launch.h | 24 ++++++++++++++++++- .../nexus_processor/image_processor_rga.h | 3 ++- 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/cpp/inspireface/initialization_module/launch.cpp b/cpp/inspireface/initialization_module/launch.cpp index 9a744a35..9312a052 100644 --- a/cpp/inspireface/initialization_module/launch.cpp +++ b/cpp/inspireface/initialization_module/launch.cpp @@ -99,4 +99,12 @@ void Launch::Unload() { } } +void Launch::SetRockchipDmaHeapPath(const std::string& path) { + m_rockchip_dma_heap_path_ = path; +} + +std::string Launch::GetRockchipDmaHeapPath() const { + return m_rockchip_dma_heap_path_; +} + } // namespace inspire \ No newline at end of file diff --git a/cpp/inspireface/initialization_module/launch.h b/cpp/inspireface/initialization_module/launch.h index d70b8ed9..e790f59e 100644 --- a/cpp/inspireface/initialization_module/launch.h +++ b/cpp/inspireface/initialization_module/launch.h @@ -6,6 +6,9 @@ #ifndef INSPIREFACE_LAUNCH_H #define INSPIREFACE_LAUNCH_H #include "middleware/model_archive/inspire_archive.h" +#if defined(ISF_ENABLE_RGA) +#include "middleware/nexus_processor/rga/dma_alloc.h" +#endif #include #ifndef INSPIRE_API @@ -43,8 +46,27 @@ class INSPIRE_API Launch { // Unloads the resources and resets the system to its initial state. void Unload(); + // Set the rockchip dma heap path + void SetRockchipDmaHeapPath(const std::string& path); + + // Get the rockchip dma heap path + std::string GetRockchipDmaHeapPath() const; + private: - Launch() : m_load_(false), m_archive_(nullptr) {} ///< Private constructor for the singleton pattern. + // Parameters + std::string m_rockchip_dma_heap_path_; + + // Constructor + Launch() : m_load_(false), m_archive_(nullptr) { +#if defined(ISF_ENABLE_RGA) +#if defined(ISF_RKNPU_RV1106) + m_rockchip_dma_heap_path_ = RV1106_CMA_HEAP_PATH; +#else + m_rockchip_dma_heap_path_ = DMA_HEAP_DMA32_UNCACHE_PATCH; +#endif + INSPIRE_LOGW("Rockchip dma heap configured path: %s", m_rockchip_dma_heap_path_.c_str()); +#endif + } ///< Private constructor for the singleton pattern. static std::mutex mutex_; ///< Mutex for synchronizing access to the singleton instance. static std::shared_ptr instance_; ///< The singleton instance of Launch. diff --git a/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h b/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h index 9d44852e..00e85b21 100644 --- a/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h +++ b/cpp/inspireface/middleware/nexus_processor/image_processor_rga.h @@ -25,6 +25,7 @@ #include "RgaUtils.h" #include "rga/utils.h" #include "rga/dma_alloc.h" +#include "initialization_module/launch.h" namespace inspire { @@ -108,7 +109,7 @@ class RgaImageProcessor : public ImageProcessor { channels = c; buffer_size = width * height * channels; - int ret = dma_buf_alloc(DMA_HEAP_DMA32_UNCACHE_PATCH, buffer_size, &dma_fd, &virtual_addr); + int ret = dma_buf_alloc(INSPIRE_LAUNCH->GetRockchipDmaHeapPath().c_str(), buffer_size, &dma_fd, &virtual_addr); if (ret < 0) { INSPIRECV_LOG(ERROR) << "Failed to allocate DMA buffer: " << ret; return false; From 76c08b4f506ddf0f15108c9f9ae17d967f3cc9e9 Mon Sep 17 00:00:00 2001 From: tunm Date: Tue, 7 Jan 2025 18:07:10 +0800 Subject: [PATCH 5/9] WIP: Add RK356X model --- command/build_cross_rk356x_aarch64.sh | 2 +- command/download_models_general.sh | 11 ++++++++--- cpp/inspireface/c_api/inspireface.cc | 10 ++++++++++ cpp/inspireface/c_api/inspireface.h | 17 +++++++++++++++++ 4 files changed, 36 insertions(+), 4 deletions(-) diff --git a/command/build_cross_rk356x_aarch64.sh b/command/build_cross_rk356x_aarch64.sh index 17b6acbb..7992dfda 100644 --- a/command/build_cross_rk356x_aarch64.sh +++ b/command/build_cross_rk356x_aarch64.sh @@ -74,7 +74,7 @@ mkdir -p ${BUILD_FOLDER_PATH} # shellcheck disable=SC2164 cd ${BUILD_FOLDER_PATH} -# export cross_compile_toolchain=/home/jingyuyan/software/arm-rockchip830-linux-uclibcgnueabihf +# export ARM_CROSS_COMPILE_TOOLCHAIN=/host/software/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu cmake -DCMAKE_SYSTEM_NAME=Linux \ -DCMAKE_BUILD_TYPE=Release \ diff --git a/command/download_models_general.sh b/command/download_models_general.sh index 127965e7..92dae5c4 100644 --- a/command/download_models_general.sh +++ b/command/download_models_general.sh @@ -8,6 +8,7 @@ URL1="https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatro URL2="https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Pikachu" URL3="https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1109" URL4="https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1106" +URL5="https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK356X" # Color codes YELLOW='\033[1;33m' @@ -43,27 +44,31 @@ if [ $# -eq 0 ]; then download_file "$URL2" download_file "$URL3" download_file "$URL4" + download_file "$URL5" # Check all files if [ -f "$DOWNLOAD_DIR/Megatron" ] && [ -f "$DOWNLOAD_DIR/Pikachu" ] && \ - [ -f "$DOWNLOAD_DIR/Gundam_RV1109" ] && [ -f "$DOWNLOAD_DIR/Gundam_RV1106" ]; then + [ -f "$DOWNLOAD_DIR/Gundam_RV1109" ] && [ -f "$DOWNLOAD_DIR/Gundam_RV1106" ] && \ + [ -f "$DOWNLOAD_DIR/Gundam_RK356X" ]; then echo "All downloads completed successfully!" print_file_path "Megatron" print_file_path "Pikachu" print_file_path "Gundam_RV1109" print_file_path "Gundam_RV1106" + print_file_path "Gundam_RK356X" else echo "Download failed!" exit 1 fi else case "$1" in - "Megatron"|"Pikachu"|"Gundam_RV1109"|"Gundam_RV1106") + "Megatron"|"Pikachu"|"Gundam_RV1109"|"Gundam_RV1106"|"Gundam_RK356X") echo "Downloading $1..." case "$1" in "Megatron") url="$URL1" ;; "Pikachu") url="$URL2" ;; "Gundam_RV1109") url="$URL3" ;; "Gundam_RV1106") url="$URL4" ;; + "Gundam_RK356X") url="$URL5" ;; esac download_file "$url" # Check file @@ -76,7 +81,7 @@ else fi ;; *) - echo "Invalid argument. Please use 'Megatron', 'Pikachu', 'Gundam_RV1109' or 'Gundam_RV1106'" + echo "Invalid argument. Please use 'Megatron', 'Pikachu', 'Gundam_RV1109', 'Gundam_RV1106' or 'Gundam_RK356X'" exit 1 ;; esac diff --git a/cpp/inspireface/c_api/inspireface.cc b/cpp/inspireface/c_api/inspireface.cc index ddf68185..db32cfd4 100644 --- a/cpp/inspireface/c_api/inspireface.cc +++ b/cpp/inspireface/c_api/inspireface.cc @@ -362,6 +362,16 @@ HResult HFFeatureHubDataDisable() { return FEATURE_HUB_DB->DisableHub(); } +HResult HFSetExpansiveHardwareRockchipDmaHeapPath(HPath path) { + INSPIRE_LAUNCH->SetRockchipDmaHeapPath(path); + return HSUCCEED; +} + +HResult HFQueryExpansiveHardwareRockchipDmaHeapPath(HString path) { + strcpy(path, INSPIRE_LAUNCH->GetRockchipDmaHeapPath().c_str()); + return HSUCCEED; +} + HResult HFFeatureHubDataEnable(HFFeatureHubConfiguration configuration) { inspire::DatabaseConfiguration param; if (configuration.primaryKeyMode != HF_PK_AUTO_INCREMENT && configuration.primaryKeyMode != HF_PK_MANUAL_INPUT) { diff --git a/cpp/inspireface/c_api/inspireface.h b/cpp/inspireface/c_api/inspireface.h index ce754222..c3bd9cf2 100644 --- a/cpp/inspireface/c_api/inspireface.h +++ b/cpp/inspireface/c_api/inspireface.h @@ -238,6 +238,23 @@ HYPER_CAPI_EXPORT extern HResult HFTerminateInspireFace(); * */ HYPER_CAPI_EXPORT extern HResult HFQueryInspireFaceLaunchStatus(HInt32 *status); +/** + * @brief Set the rockchip dma heap path + * By default, we have already configured the DMA Heap address used by RGA on RK devices. + * If you wish to customize this address, you can modify it through this API. + * @param path The path to the rockchip dma heap + * @return HResult indicating the success or failure of the operation. + * */ +HYPER_CAPI_EXPORT extern HResult HFSetExpansiveHardwareRockchipDmaHeapPath(HPath path); + +/** + * @brief Query the rockchip dma heap path + * @param path Pointer to a pre-allocated character array that will store the returned path. + * The array should be at least 256 bytes in size. + * @return HResult indicating the success or failure of the operation. + * */ +HYPER_CAPI_EXPORT extern HResult HFQueryExpansiveHardwareRockchipDmaHeapPath(HString path); + /************************************************************************ * FaceSession ************************************************************************/ From 16b0b3943a456d3516533e6db121789de11aecdb Mon Sep 17 00:00:00 2001 From: Jingyu Date: Wed, 8 Jan 2025 00:06:48 +0800 Subject: [PATCH 6/9] WIP: The landmark model does not match the rk356x --- .../customized/rknn_adapter_general.h | 502 ++++++++++++++++++ .../track_module/face_detect/rnet_adapt.cpp | 2 +- cpp/sample/api/sample_face_track.cpp | 14 +- 3 files changed, 510 insertions(+), 8 deletions(-) create mode 100644 cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h diff --git a/cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h b/cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h new file mode 100644 index 00000000..98283662 --- /dev/null +++ b/cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h @@ -0,0 +1,502 @@ +// +// Created by Tunm-Air13 on 2022/10/10. +// + +#ifndef MAGIC_GESTURES_RKNN_ADAPTER_H +#define MAGIC_GESTURES_RKNN_ADAPTER_H + +#include +#include "rknn_api.h" +#include "data_type.h" +#include "log.h" + +/** + * @brief 获取RKNN数据类型字符串 + * @ingroup NeuralNetwork + * @param type 数据类型 + * @return 字符串编码类型 + */ +inline const char *get_type_string_(rknn_tensor_type type) { + switch (type) { + case RKNN_TENSOR_FLOAT32: + return "FP32"; + case RKNN_TENSOR_FLOAT16: + return "FP16"; + case RKNN_TENSOR_INT8: + return "INT8"; + case RKNN_TENSOR_UINT8: + return "UINT8"; + case RKNN_TENSOR_INT16: + return "INT16"; + default: + return "UNKNOW"; + } +} + +inline const char *get_qnt_type_string_(rknn_tensor_qnt_type type) { + switch (type) { + case RKNN_TENSOR_QNT_NONE: + return "NONE"; + case RKNN_TENSOR_QNT_DFP: + return "DFP"; + case RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC: + return "AFFINE"; + default: + return "UNKNOW"; + } +} + +inline void print_tensor_attr_(const rknn_tensor_attr &attr) { + printf(" n_dims:%d \n", attr.n_dims); + printf(" [ "); + for (int i = 0; i < attr.n_dims; i++) { + printf(" %d ", attr.dims[i]); + } + printf("] \n"); + printf(" size:%d \n", attr.size); + printf(" n_elems:%d \n", attr.n_elems); + printf(" scale:%f \n", attr.scale); + printf(" name:%s \n", attr.name); +} + +inline unsigned char *load_data_(FILE *fp, size_t ofst, size_t sz) { + unsigned char *data; + int ret; + + data = NULL; + + if (NULL == fp) { + return NULL; + } + + ret = fseek(fp, ofst, SEEK_SET); + if (ret != 0) { + printf("blob seek failure.\n"); + return NULL; + } + + data = (unsigned char *)malloc(sz); + if (data == NULL) { + printf("buffer malloc failure.\n"); + return NULL; + } + ret = fread(data, 1, sz, fp); + return data; +} + +inline unsigned char *load_model_(const char *filename, int *model_size) { + FILE *fp; + unsigned char *data; + + fp = fopen(filename, "rb"); + if (NULL == fp) { + printf("Open file %s failed.\n", filename); + return NULL; + } + + fseek(fp, 0, SEEK_END); + int size = ftell(fp); + + data = load_data_(fp, 0, size); + + fclose(fp); + + *model_size = size; + return data; +} + +/** + * RKNN执行推理的状态 + * @ingroup NeuralNetwork + */ +enum Status { + SUCCESS = 0, ///< 执行成功 + ERROR_SHAPE_MATCH = 1, ///< 执行错误,tensor的形状不匹配 + ERROR_DATA_ORDER = 2 ///< 执行错误,tensor数据排序错误 +}; + +/** + * @brief RKNN神经网络推理适配器 + * @details 自定义化通用型的RKNN推理适配包装类, 可作与其他需要使用RKNN进行神经网络推理的任务进行组合使用 + * @ingroup NeuralNetwork + */ +class RKNNAdapterGeneral { +public: + // 禁止拷贝 + RKNNAdapterGeneral(const RKNNAdapterGeneral &) = delete; + RKNNAdapterGeneral &operator=(const RKNNAdapterGeneral &) = delete; + RKNNAdapterGeneral() = default; + + /** + * @brief 手动初始化 + * @details 初始化rknn模型,会申请一块内存进行创建推理器会话 + * @param model_path rknn模型的路径 + * @return 返回初始化结果 + */ + int Initialize(const char *model_path) { + /* Create the neural network */ + int model_data_size = 0; + model_data = load_model_(model_path, &model_data_size); + int ret = rknn_init(&rk_ctx_, model_data, model_data_size, 0, NULL); + // LOG_INFO("RKNN Init ok."); + if (ret < 0) { + INSPIRE_LOGE("rknn_init fail! ret=%d", ret); + return -1; + } + + return init_(); + } + + /** + * @brief 手动初始化 + * @details 初始化rknn模型,会申请一块内存进行创建推理器会话 + * @param model 传入SolexCV视觉库中的模型类型指针 + * @return 返回初始化结果 + */ + int Initialize(void *model_data, unsigned int model_size) { + /* Create the neural network */ + int model_data_size = 0; + // LOGD("Read models size: %f MB", round(float(model->modelsize.caffemodel_size) / (1024 * 1024))); + INSPIRE_LOGI("The neural network is being initialized..."); + int ret = rknn_init(&rk_ctx_, model_data, model_size, 0, NULL); + // LOG_INFO("RKNN Init ok."); + if (ret < 0) { + INSPIRE_LOGE("rknn_init fail! ret=%d", ret); + return -1; + } + + return init_(); + } + + int Initialize(const unsigned char *model_data, const unsigned int model_size) { + /* Create the neural network */ + INSPIRE_LOGI("The neural network is being initialized..."); + int ret = rknn_init(&rk_ctx_, (void *)model_data, model_size, 0, NULL); + + if (ret < 0) { + INSPIRE_LOGE("rknn_init fail! ret=%d", ret); + return -1; + } + + return init_(); + } + + /** + * @brief 获取输入图像Tensor的尺寸 + * @details 用于获取rknn模型的输入尺寸,提取预知输入大小方便进行输入的前处理操作 + * @param index 输入层的索引号 + * @return 返回由各个尺寸组成的维度信息 + */ + std::vector GetInputTensorSize(const int &index) { + std::vector dims(input_attrs_[index].dims, input_attrs_[index].dims + input_attrs_[index].n_dims); + return dims; + } + + /** + * @brief 获取s输出图像Tensor的尺寸 + * @details 用于获取rknn模型的输出尺寸 + * @param index 输出层的索引号 + * @return 返回由各个尺寸组成的维度信息 + */ + std::vector GetOutputTensorSize(const int &index) { + // std::cout << "output_attrs_[index].n_dims:" << output_attrs_[index].n_dims << std::endl; + std::vector dims(output_attrs_[index].dims, output_attrs_[index].dims + output_attrs_[index].n_dims); + return dims; + } + + /** + * @brief 获取输出Tensor的长度 + * @details 用于获取rknn模型的输出的长度信息 + * @param index 输出层的索引号 + * @return 长度信息 + */ + int GetOutputTensorLen(const int &index) { + std::vector tensor_size_out = GetOutputTensorSize(index); + int size = 1; + for (auto &one : tensor_size_out) + size *= one; + return size; + } + + /** + * @brief 设置输入层的数据流 + * @details 将图像输入喂入输入层中,是执行推理的前一步骤 + * @param index 输入层索引号 + * @param data 图像数据, 采用opencv的mat类型 + * @return 返回输入状态 + */ + Status SetInputData(const int index, uint8_t *data, rknn_tensor_type type = RKNN_TENSOR_UINT8, rknn_tensor_format format = RKNN_TENSOR_NHWC) { + if (index < input_tensors_.size()) { + input_tensors_[index].index = 0; + input_tensors_[index].type = RKNN_TENSOR_UINT8; + input_tensors_[index].size = input_attrs_[0].dims[1] * input_attrs_[0].dims[2] * input_attrs_[0].dims[3]; + input_tensors_[index].fmt = RKNN_TENSOR_NHWC; + input_tensors_[index].buf = data; + input_tensors_[index].pass_through = 0; + } else { + INSPIRE_LOGE("error: assert index < len"); + } + return SUCCESS; + } + + Status SetInputData(const int index, void *data, int width, int height, int channels, rknn_tensor_type type = RKNN_TENSOR_UINT8, + rknn_tensor_format format = RKNN_TENSOR_NHWC) { + if (index < input_tensors_.size()) { + input_tensors_[index].index = 0; + input_tensors_[index].type = type; + input_tensors_[index].size = width * height * channels; + input_tensors_[index].fmt = format; + input_tensors_[index].buf = data; + input_tensors_[index].pass_through = 0; + } else { + INSPIRE_LOGE("error: assert index < len"); + } + return SUCCESS; + } + + /** + * @brief 执行神经网络推理 + * @details 需要完成输入数据到输入层后才能执行该步骤, 该步骤为耗时操作 + * @return 返回推理状态结果 + */ + int RunSession() { + // LOGD("set input"); + int ret = rknn_inputs_set(rk_ctx_, rk_io_num_.n_input, input_tensors_.data()); + if (ret < 0) + INSPIRE_LOGE("rknn_input fail! ret=%d", ret); + + for (int i = 0; i < rk_io_num_.n_output; i++) { + output_tensors_[i].want_float = outputs_want_float_; + } + + // LOGD("rknn_run"); + ret = rknn_run(rk_ctx_, nullptr); + if (ret < 0) { + INSPIRE_LOGE("rknn_run fail! ret=%d", ret); + return -1; + } + + ret = rknn_outputs_get(rk_ctx_, rk_io_num_.n_output, output_tensors_.data(), NULL); + if (ret < 0) { + INSPIRE_LOGE("rknn_init fail! ret=%d", ret); + exit(0); + } + return ret; + } + + /** + * @brief 获取输出层的数据 + * @details 返回推理结束后输出层数据,需要先执行推理才能获取 + * @param index 输出层索引 + * @return 返回输出数据的指针 + */ + const float *GetOutputData(const int index) { + return (float *)(output_tensors_[index].buf); + } + + void *GetOutputFlow(const int index) { + return output_tensors_[index].buf; + } + + /** + * @brief 获取输出层的数据(UINT8) + * @details 返回推理结束后输出层UInt8格式的数据,需要先执行推理才能获取 + * @param index 输出层索引 + * @return 返回输出数据的指针 + */ + u_int8_t *GetOutputDataU8(const int index) { + return (uint8_t *)(output_tensors_[index].buf); + } + + /** + * @brief 可变长数据输入操作 + * @details 暂时还未支持该功能 + * @param index_name 输入层索引 + * @param shape 需要改变的维度 + */ + void ResizeInputTensor(const std::string &index_name, const std::vector &shape) { + // No implementation + } + + /** + * @brief 检测尺寸 + * @details 暂时未实现该功能 + * */ + void CheckSize() { + // No implementation + } + + /** + * @brief 获取输出层的数量 + * @details 获取输出层的数量, 通常在多任务多输出神经网络使用 + * @return 返回数量 + */ + size_t GetOutputsNum() const { + return rk_io_num_.n_output; + } + + /** + * @brief 返回输出层的所有Tensor + * @details 将输出层所有的Tensor进行获取 + * @return 返回所有Tensor + */ + std::vector &GetOutputTensors() { + return output_tensors_; + } + + /** + * @brief 返回输出层的所有Tensor节点信息 + * @details 节点信息包含输出尺寸、类型等其他信息 + * @return 返回信息 + */ + std::vector &GetOutputTensorAttr() { + return output_attrs_; + } + + /** + * @brief 析构函数 + */ + ~RKNNAdapterGeneral() { + Release(); + } + + /** + * @brief 释放资源 + * @details 释放掉所有内存中的资源,通常在析构函数下进行 + */ + void Release() { + rknn_destroy(rk_ctx_); + if (model_data) { + free(model_data); + } + } + + /** + * @brief 设置输出模式是否需要支持浮点格式 + * @details 根据编码风格选定,有些后处理会使用UInt8类型的格式进编解码 + * @param outputsWantFloat 0或1 + */ + void setOutputsWantFloat(int outputsWantFloat) { + outputs_want_float_ = outputsWantFloat; + } + + void ReleaseOutputs() { + rknn_outputs_release(rk_ctx_, rk_io_num_.n_output, output_tensors_.data()); + } + + std::vector &GetOutputAttrs() { + return output_attrs_; + } + + const float *GetOutputDataPtr(const int index) { + return (float *)(output_tensors_[index].buf); + } + +private: + /** + * 初始化 + * @return + */ + int init_() { + rknn_sdk_version version; + int ret = rknn_query(rk_ctx_, RKNN_QUERY_SDK_VERSION, &version, sizeof(rknn_sdk_version)); + if (ret < 0) { + INSPIRE_LOGE("rknn_init fail! ret=%d", ret); + return -1; + } + INSPIRE_LOGD("sdk version: %s driver version: %s", version.api_version, version.drv_version); + + ret = rknn_query(rk_ctx_, RKNN_QUERY_IN_OUT_NUM, &rk_io_num_, sizeof(rk_io_num_)); + + if (ret != RKNN_SUCC) { + INSPIRE_LOGE("rknn_query ctx fail! ret=%d", ret); + return -1; + } + + INSPIRE_LOGD("models input num: %d, output num: %d", rk_io_num_.n_input, rk_io_num_.n_output); + + // spdlog::trace("input tensors: "); + input_attrs_.resize(rk_io_num_.n_input); + output_attrs_.resize(rk_io_num_.n_output); + input_tensors_.resize(rk_io_num_.n_input); + output_tensors_.resize(rk_io_num_.n_output); + + for (int i = 0; i < rk_io_num_.n_input; ++i) { + memset(&input_attrs_[i], 0, sizeof(input_attrs_[i])); + memset(&input_tensors_[i], 0, sizeof(input_tensors_[i])); + input_attrs_[i].index = i; + ret = rknn_query(rk_ctx_, RKNN_QUERY_INPUT_ATTR, &(input_attrs_[i]), sizeof(rknn_tensor_attr)); + + INSPIRE_LOGD("input node index %d", i); + int channel = 3; + int width = 0; + int height = 0; + if (input_attrs_[i].fmt == RKNN_TENSOR_NCHW) { + INSPIRE_LOGD("models is NCHW input fmt"); + width = input_attrs_[i].dims[0]; + height = input_attrs_[i].dims[1]; + } else { + INSPIRE_LOGD("models is NHWC input fmt"); + width = input_attrs_[i].dims[1]; + height = input_attrs_[i].dims[2]; + } + INSPIRE_LOGD("models input height=%d, width=%d, channel=%d", height, width, channel); + // print_tensor_attr_(input_attrs_); + if (ret != RKNN_SUCC) { + INSPIRE_LOGE("rknn_query fail! ret=%d", ret); + return -1; + } + } + + // printf("[debug]models input num: %d, output num: %d\n", rk_io_num_.n_input, rk_io_num_.n_output); + for (int i = 0; i < rk_io_num_.n_output; ++i) { + memset(&output_attrs_[i], 0, sizeof(output_attrs_[i])); + memset(&output_tensors_[i], 0, sizeof(output_tensors_[i])); + output_attrs_[i].index = i; + ret = rknn_query(rk_ctx_, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs_[i]), sizeof(rknn_tensor_attr)); + + if (output_attrs_[i].qnt_type != RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC || output_attrs_[i].type != RKNN_TENSOR_UINT8) { + // LOGE("The Demo required for a Affine asymmetric u8 quantized rknn models, but output quant type is %s, output " + // "data type is %s", + // get_qnt_type_string_(output_attrs_[i].qnt_type), get_type_string_(output_attrs_[i].type)); + // return -1; + } + // print_tensor_attr_(output_attrs_[i]); + + // rknn_tensor_attr rknn_attr; + // memset(&rknn_attr, 0, sizeof(rknn_tensor_attr)); + // + // ret = rknn_query(rk_ctx_, RKNN_QUERY_OUTPUT_ATTR, &rknn_attr, + // sizeof(rknn_tensor_attr)); + // printf("output node index %d \n", i); + // print_tensor_attr_(rknn_attr); + + if (ret != RKNN_SUCC) { + INSPIRE_LOGE("rknn_query fail! ret=%d", ret); + return -1; + } + } + + return ret; + } + +private: + rknn_context rk_ctx_; // rknn的上下文管理器 + rknn_input_output_num rk_io_num_; // rkn的输入输出流数量 + + std::vector input_attrs_; // 输入属性 + std::vector output_attrs_; // 输出属性 + std::vector input_tensors_; // 输入数据 + std::vector output_tensors_; // 输出数据 + + int outputs_want_float_ = 0; // 支持浮点输出 + + std::vector tensor_shape_; // 输入形状 + int width_; // 输入宽 + int height_; // 输入高 + bool run_status_; // 执行状态 + + unsigned char *model_data; // 模型数据流 +}; + +#endif // MAGIC_GESTURES_RKNN_ADAPTER_H diff --git a/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp b/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp index 3fdf160c..eb3e0806 100644 --- a/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp +++ b/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp @@ -19,7 +19,7 @@ float RNetAdapt::operator()(const inspirecv::Image &bgr_affine) { m_processor_->MarkDone(); #ifdef INFERENCE_HELPER_ENABLE_RKNN2 auto sm = Softmax(outputs[0].second); - // std::cout << sm[0] << ", " << sm[1] << std ::endl; + std::cout << sm[0] << ", " << sm[1] << std ::endl; return sm[1]; #else return outputs[0].second[1]; diff --git a/cpp/sample/api/sample_face_track.cpp b/cpp/sample/api/sample_face_track.cpp index 997965f5..3cf03463 100644 --- a/cpp/sample/api/sample_face_track.cpp +++ b/cpp/sample/api/sample_face_track.cpp @@ -61,7 +61,7 @@ int main(int argc, char* argv[]) { HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_DETECT_MODE_LANDMARK; // Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without // tracking - HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT; + HFDetectMode detMode = HF_DETECT_MODE_LIGHT_TRACK; // Maximum number of faces detected HInt32 maxDetectNum = 20; // Face detection image input level @@ -94,10 +94,12 @@ int main(int argc, char* argv[]) { // Execute HF_FaceContextRunFaceTrack captures face information in an image HFMultipleFaceData multipleFaceData = {0}; - ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData); - if (ret != HSUCCEED) { - std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl; - return ret; + for (int i = 0; i < 3; i++) { + ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData); + if (ret != HSUCCEED) { + std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl; + return ret; + } } // Print the number of faces detected @@ -155,9 +157,7 @@ int main(int argc, char* argv[]) { // when FaceContext is created! auto pipelineOption = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS; // In this loop, all faces are processed - std::cout << "HFMultipleFacePipelineProcessOptional" << std::endl; ret = HFMultipleFacePipelineProcessOptional(session, imageHandle, &multipleFaceData, pipelineOption); - std::cout << "HFMultipleFacePipelineProcessOptional success" << std::endl; if (ret != HSUCCEED) { std::cout << "Execute Pipeline error: " << ret << std::endl; return ret; From 89b1692a234a8af3c7da6a10fd1f6ecd8d3cd081 Mon Sep 17 00:00:00 2001 From: Jingyu Date: Wed, 8 Jan 2025 22:47:59 +0800 Subject: [PATCH 7/9] WIP: Complete rk356x --- .../workflows/linux-armv7-rk356x-aarch64.yaml | 49 ++ command/build_cross_rk356x_aarch64.sh | 36 +- cpp/inspireface/c_api/intypedef.h | 1 + .../customized/rknn_adapter_general.h | 502 ------------------ .../attribute/mask_predict_adapt.cpp | 2 +- .../track_module/face_detect/rnet_adapt.cpp | 1 - cpp/sample/api/sample_face_track.cpp | 12 +- doc/Error-Feedback-Codes.md | 63 +-- tools/error_table.md | 75 +-- 9 files changed, 130 insertions(+), 611 deletions(-) create mode 100644 .github/workflows/linux-armv7-rk356x-aarch64.yaml delete mode 100644 cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h diff --git a/.github/workflows/linux-armv7-rk356x-aarch64.yaml b/.github/workflows/linux-armv7-rk356x-aarch64.yaml new file mode 100644 index 00000000..788a1de1 --- /dev/null +++ b/.github/workflows/linux-armv7-rk356x-aarch64.yaml @@ -0,0 +1,49 @@ +name: Build Linux-Ubuntu-AArch64-rk356x + +on: + push: + branches: ["dev/rk356x"] + +# Set the global GitHub token environment variable +env: + GITHUB_TOKEN: ${{ secrets.MY_GITHUB_TOKEN }} + +jobs: + build: + name: Compile Linux-AArch64 RK356X + runs-on: ubuntu-latest + + steps: + # Step 1: Checkout the repository + - name: Checkout Repository + uses: actions/checkout@v4 + + # Step 2: Synchronize and update submodules recursively + - name: Update submodules + run: | + git clone --recurse-submodules https://github.com/tunmx/inspireface-3rdparty.git 3rdparty + + # Step 3: Install wget and xz-utils + - name: Install wget and xz-utils + run: | + sudo apt-get update + sudo apt-get install -y wget xz-utils + + # Step 4: Download Toolchain + - name: Download and Extract Linaro Toolchain + run: | + mkdir -p /opt/linaro-toolchain + wget -qO- https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/aarch64-linux-gnu/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu.tar.xz | tar -xJ -C /opt/linaro-toolchain --strip-components=1 + + # Step 5: Install Toolchain + - name: Set environment variables + run: | + echo "ARM_CROSS_COMPILE_TOOLCHAIN=/opt/linaro-toolchain" >> $GITHUB_ENV + echo "PATH=/opt/linaro-toolchain/bin:${PATH}" >> $GITHUB_ENV + + # Step 6: Start building the SDK + - name: Start Building the InspireFace-Linux-AArch64-rk356x + run: | + bash command/build_cross_rk356x_aarch64.sh + ls build + diff --git a/command/build_cross_rk356x_aarch64.sh b/command/build_cross_rk356x_aarch64.sh index 7992dfda..81d57420 100644 --- a/command/build_cross_rk356x_aarch64.sh +++ b/command/build_cross_rk356x_aarch64.sh @@ -32,36 +32,6 @@ fi SCRIPT_DIR=$(pwd) # Project dir -# # Create .rknpu2_cache directory if it doesn't exist -# CACHE_DIR="$(pwd)/.rknpu2_cache" -# mkdir -p "$CACHE_DIR" - -# # Check if MNN-2.3.0 directory already exists -# if [ ! -d "$CACHE_DIR/MNN-2.3.0" ]; then -# echo "Downloading MNN 2.3.0..." -# # Download MNN 2.3.0 -# if ! wget -P "$CACHE_DIR" https://github.com/alibaba/MNN/archive/refs/tags/2.3.0.zip; then -# echo "Error: Failed to download MNN 2.3.0" -# exit 1 -# fi - -# # Extract the zip file -# cd "$CACHE_DIR" -# if ! unzip 2.3.0.zip; then -# echo "Error: Failed to extract MNN 2.3.0" -# exit 1 -# fi - -# # Remove the zip file -# rm 2.3.0.zip - -# echo "MNN 2.3.0 downloaded and extracted" -# else -# echo "MNN-2.3.0 already exists in cache" -# fi - -# # Set absolute path to MNN source -# export MNN_CUSTOM_SOURCE="$CACHE_DIR/MNN-2.3.0" echo "MNN_CUSTOM_SOURCE: ${MNN_CUSTOM_SOURCE}" cd ${SCRIPT_DIR} @@ -93,7 +63,7 @@ cmake -DCMAKE_SYSTEM_NAME=Linux \ -DISF_RK_COMPILER_TYPE=aarch64 \ -DISF_ENABLE_RGA=ON \ -DISF_ENABLE_COST_TIME=OFF \ - -DISF_BUILD_WITH_SAMPLE=ON \ + -DISF_BUILD_WITH_SAMPLE=OFF \ -DISF_BUILD_WITH_TEST=OFF \ -DISF_ENABLE_BENCHMARK=OFF \ -DISF_ENABLE_USE_LFW_DATA=OFF \ @@ -101,6 +71,6 @@ cmake -DCMAKE_SYSTEM_NAME=Linux \ -DISF_BUILD_SHARED_LIBS=OFF ${SCRIPT_DIR} make -j4 -# make install +make install -# move_install_files "$(pwd)" \ No newline at end of file +move_install_files "$(pwd)" \ No newline at end of file diff --git a/cpp/inspireface/c_api/intypedef.h b/cpp/inspireface/c_api/intypedef.h index cf39bdda..fce02d5d 100644 --- a/cpp/inspireface/c_api/intypedef.h +++ b/cpp/inspireface/c_api/intypedef.h @@ -5,6 +5,7 @@ #ifndef INSPIREFACE_INTYPEDEF_H #define INSPIREFACE_INTYPEDEF_H +#include // clang-format off typedef void* HPVoid; ///< Pointer to Void. diff --git a/cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h b/cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h deleted file mode 100644 index 98283662..00000000 --- a/cpp/inspireface/middleware/inference_helper/customized/rknn_adapter_general.h +++ /dev/null @@ -1,502 +0,0 @@ -// -// Created by Tunm-Air13 on 2022/10/10. -// - -#ifndef MAGIC_GESTURES_RKNN_ADAPTER_H -#define MAGIC_GESTURES_RKNN_ADAPTER_H - -#include -#include "rknn_api.h" -#include "data_type.h" -#include "log.h" - -/** - * @brief 获取RKNN数据类型字符串 - * @ingroup NeuralNetwork - * @param type 数据类型 - * @return 字符串编码类型 - */ -inline const char *get_type_string_(rknn_tensor_type type) { - switch (type) { - case RKNN_TENSOR_FLOAT32: - return "FP32"; - case RKNN_TENSOR_FLOAT16: - return "FP16"; - case RKNN_TENSOR_INT8: - return "INT8"; - case RKNN_TENSOR_UINT8: - return "UINT8"; - case RKNN_TENSOR_INT16: - return "INT16"; - default: - return "UNKNOW"; - } -} - -inline const char *get_qnt_type_string_(rknn_tensor_qnt_type type) { - switch (type) { - case RKNN_TENSOR_QNT_NONE: - return "NONE"; - case RKNN_TENSOR_QNT_DFP: - return "DFP"; - case RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC: - return "AFFINE"; - default: - return "UNKNOW"; - } -} - -inline void print_tensor_attr_(const rknn_tensor_attr &attr) { - printf(" n_dims:%d \n", attr.n_dims); - printf(" [ "); - for (int i = 0; i < attr.n_dims; i++) { - printf(" %d ", attr.dims[i]); - } - printf("] \n"); - printf(" size:%d \n", attr.size); - printf(" n_elems:%d \n", attr.n_elems); - printf(" scale:%f \n", attr.scale); - printf(" name:%s \n", attr.name); -} - -inline unsigned char *load_data_(FILE *fp, size_t ofst, size_t sz) { - unsigned char *data; - int ret; - - data = NULL; - - if (NULL == fp) { - return NULL; - } - - ret = fseek(fp, ofst, SEEK_SET); - if (ret != 0) { - printf("blob seek failure.\n"); - return NULL; - } - - data = (unsigned char *)malloc(sz); - if (data == NULL) { - printf("buffer malloc failure.\n"); - return NULL; - } - ret = fread(data, 1, sz, fp); - return data; -} - -inline unsigned char *load_model_(const char *filename, int *model_size) { - FILE *fp; - unsigned char *data; - - fp = fopen(filename, "rb"); - if (NULL == fp) { - printf("Open file %s failed.\n", filename); - return NULL; - } - - fseek(fp, 0, SEEK_END); - int size = ftell(fp); - - data = load_data_(fp, 0, size); - - fclose(fp); - - *model_size = size; - return data; -} - -/** - * RKNN执行推理的状态 - * @ingroup NeuralNetwork - */ -enum Status { - SUCCESS = 0, ///< 执行成功 - ERROR_SHAPE_MATCH = 1, ///< 执行错误,tensor的形状不匹配 - ERROR_DATA_ORDER = 2 ///< 执行错误,tensor数据排序错误 -}; - -/** - * @brief RKNN神经网络推理适配器 - * @details 自定义化通用型的RKNN推理适配包装类, 可作与其他需要使用RKNN进行神经网络推理的任务进行组合使用 - * @ingroup NeuralNetwork - */ -class RKNNAdapterGeneral { -public: - // 禁止拷贝 - RKNNAdapterGeneral(const RKNNAdapterGeneral &) = delete; - RKNNAdapterGeneral &operator=(const RKNNAdapterGeneral &) = delete; - RKNNAdapterGeneral() = default; - - /** - * @brief 手动初始化 - * @details 初始化rknn模型,会申请一块内存进行创建推理器会话 - * @param model_path rknn模型的路径 - * @return 返回初始化结果 - */ - int Initialize(const char *model_path) { - /* Create the neural network */ - int model_data_size = 0; - model_data = load_model_(model_path, &model_data_size); - int ret = rknn_init(&rk_ctx_, model_data, model_data_size, 0, NULL); - // LOG_INFO("RKNN Init ok."); - if (ret < 0) { - INSPIRE_LOGE("rknn_init fail! ret=%d", ret); - return -1; - } - - return init_(); - } - - /** - * @brief 手动初始化 - * @details 初始化rknn模型,会申请一块内存进行创建推理器会话 - * @param model 传入SolexCV视觉库中的模型类型指针 - * @return 返回初始化结果 - */ - int Initialize(void *model_data, unsigned int model_size) { - /* Create the neural network */ - int model_data_size = 0; - // LOGD("Read models size: %f MB", round(float(model->modelsize.caffemodel_size) / (1024 * 1024))); - INSPIRE_LOGI("The neural network is being initialized..."); - int ret = rknn_init(&rk_ctx_, model_data, model_size, 0, NULL); - // LOG_INFO("RKNN Init ok."); - if (ret < 0) { - INSPIRE_LOGE("rknn_init fail! ret=%d", ret); - return -1; - } - - return init_(); - } - - int Initialize(const unsigned char *model_data, const unsigned int model_size) { - /* Create the neural network */ - INSPIRE_LOGI("The neural network is being initialized..."); - int ret = rknn_init(&rk_ctx_, (void *)model_data, model_size, 0, NULL); - - if (ret < 0) { - INSPIRE_LOGE("rknn_init fail! ret=%d", ret); - return -1; - } - - return init_(); - } - - /** - * @brief 获取输入图像Tensor的尺寸 - * @details 用于获取rknn模型的输入尺寸,提取预知输入大小方便进行输入的前处理操作 - * @param index 输入层的索引号 - * @return 返回由各个尺寸组成的维度信息 - */ - std::vector GetInputTensorSize(const int &index) { - std::vector dims(input_attrs_[index].dims, input_attrs_[index].dims + input_attrs_[index].n_dims); - return dims; - } - - /** - * @brief 获取s输出图像Tensor的尺寸 - * @details 用于获取rknn模型的输出尺寸 - * @param index 输出层的索引号 - * @return 返回由各个尺寸组成的维度信息 - */ - std::vector GetOutputTensorSize(const int &index) { - // std::cout << "output_attrs_[index].n_dims:" << output_attrs_[index].n_dims << std::endl; - std::vector dims(output_attrs_[index].dims, output_attrs_[index].dims + output_attrs_[index].n_dims); - return dims; - } - - /** - * @brief 获取输出Tensor的长度 - * @details 用于获取rknn模型的输出的长度信息 - * @param index 输出层的索引号 - * @return 长度信息 - */ - int GetOutputTensorLen(const int &index) { - std::vector tensor_size_out = GetOutputTensorSize(index); - int size = 1; - for (auto &one : tensor_size_out) - size *= one; - return size; - } - - /** - * @brief 设置输入层的数据流 - * @details 将图像输入喂入输入层中,是执行推理的前一步骤 - * @param index 输入层索引号 - * @param data 图像数据, 采用opencv的mat类型 - * @return 返回输入状态 - */ - Status SetInputData(const int index, uint8_t *data, rknn_tensor_type type = RKNN_TENSOR_UINT8, rknn_tensor_format format = RKNN_TENSOR_NHWC) { - if (index < input_tensors_.size()) { - input_tensors_[index].index = 0; - input_tensors_[index].type = RKNN_TENSOR_UINT8; - input_tensors_[index].size = input_attrs_[0].dims[1] * input_attrs_[0].dims[2] * input_attrs_[0].dims[3]; - input_tensors_[index].fmt = RKNN_TENSOR_NHWC; - input_tensors_[index].buf = data; - input_tensors_[index].pass_through = 0; - } else { - INSPIRE_LOGE("error: assert index < len"); - } - return SUCCESS; - } - - Status SetInputData(const int index, void *data, int width, int height, int channels, rknn_tensor_type type = RKNN_TENSOR_UINT8, - rknn_tensor_format format = RKNN_TENSOR_NHWC) { - if (index < input_tensors_.size()) { - input_tensors_[index].index = 0; - input_tensors_[index].type = type; - input_tensors_[index].size = width * height * channels; - input_tensors_[index].fmt = format; - input_tensors_[index].buf = data; - input_tensors_[index].pass_through = 0; - } else { - INSPIRE_LOGE("error: assert index < len"); - } - return SUCCESS; - } - - /** - * @brief 执行神经网络推理 - * @details 需要完成输入数据到输入层后才能执行该步骤, 该步骤为耗时操作 - * @return 返回推理状态结果 - */ - int RunSession() { - // LOGD("set input"); - int ret = rknn_inputs_set(rk_ctx_, rk_io_num_.n_input, input_tensors_.data()); - if (ret < 0) - INSPIRE_LOGE("rknn_input fail! ret=%d", ret); - - for (int i = 0; i < rk_io_num_.n_output; i++) { - output_tensors_[i].want_float = outputs_want_float_; - } - - // LOGD("rknn_run"); - ret = rknn_run(rk_ctx_, nullptr); - if (ret < 0) { - INSPIRE_LOGE("rknn_run fail! ret=%d", ret); - return -1; - } - - ret = rknn_outputs_get(rk_ctx_, rk_io_num_.n_output, output_tensors_.data(), NULL); - if (ret < 0) { - INSPIRE_LOGE("rknn_init fail! ret=%d", ret); - exit(0); - } - return ret; - } - - /** - * @brief 获取输出层的数据 - * @details 返回推理结束后输出层数据,需要先执行推理才能获取 - * @param index 输出层索引 - * @return 返回输出数据的指针 - */ - const float *GetOutputData(const int index) { - return (float *)(output_tensors_[index].buf); - } - - void *GetOutputFlow(const int index) { - return output_tensors_[index].buf; - } - - /** - * @brief 获取输出层的数据(UINT8) - * @details 返回推理结束后输出层UInt8格式的数据,需要先执行推理才能获取 - * @param index 输出层索引 - * @return 返回输出数据的指针 - */ - u_int8_t *GetOutputDataU8(const int index) { - return (uint8_t *)(output_tensors_[index].buf); - } - - /** - * @brief 可变长数据输入操作 - * @details 暂时还未支持该功能 - * @param index_name 输入层索引 - * @param shape 需要改变的维度 - */ - void ResizeInputTensor(const std::string &index_name, const std::vector &shape) { - // No implementation - } - - /** - * @brief 检测尺寸 - * @details 暂时未实现该功能 - * */ - void CheckSize() { - // No implementation - } - - /** - * @brief 获取输出层的数量 - * @details 获取输出层的数量, 通常在多任务多输出神经网络使用 - * @return 返回数量 - */ - size_t GetOutputsNum() const { - return rk_io_num_.n_output; - } - - /** - * @brief 返回输出层的所有Tensor - * @details 将输出层所有的Tensor进行获取 - * @return 返回所有Tensor - */ - std::vector &GetOutputTensors() { - return output_tensors_; - } - - /** - * @brief 返回输出层的所有Tensor节点信息 - * @details 节点信息包含输出尺寸、类型等其他信息 - * @return 返回信息 - */ - std::vector &GetOutputTensorAttr() { - return output_attrs_; - } - - /** - * @brief 析构函数 - */ - ~RKNNAdapterGeneral() { - Release(); - } - - /** - * @brief 释放资源 - * @details 释放掉所有内存中的资源,通常在析构函数下进行 - */ - void Release() { - rknn_destroy(rk_ctx_); - if (model_data) { - free(model_data); - } - } - - /** - * @brief 设置输出模式是否需要支持浮点格式 - * @details 根据编码风格选定,有些后处理会使用UInt8类型的格式进编解码 - * @param outputsWantFloat 0或1 - */ - void setOutputsWantFloat(int outputsWantFloat) { - outputs_want_float_ = outputsWantFloat; - } - - void ReleaseOutputs() { - rknn_outputs_release(rk_ctx_, rk_io_num_.n_output, output_tensors_.data()); - } - - std::vector &GetOutputAttrs() { - return output_attrs_; - } - - const float *GetOutputDataPtr(const int index) { - return (float *)(output_tensors_[index].buf); - } - -private: - /** - * 初始化 - * @return - */ - int init_() { - rknn_sdk_version version; - int ret = rknn_query(rk_ctx_, RKNN_QUERY_SDK_VERSION, &version, sizeof(rknn_sdk_version)); - if (ret < 0) { - INSPIRE_LOGE("rknn_init fail! ret=%d", ret); - return -1; - } - INSPIRE_LOGD("sdk version: %s driver version: %s", version.api_version, version.drv_version); - - ret = rknn_query(rk_ctx_, RKNN_QUERY_IN_OUT_NUM, &rk_io_num_, sizeof(rk_io_num_)); - - if (ret != RKNN_SUCC) { - INSPIRE_LOGE("rknn_query ctx fail! ret=%d", ret); - return -1; - } - - INSPIRE_LOGD("models input num: %d, output num: %d", rk_io_num_.n_input, rk_io_num_.n_output); - - // spdlog::trace("input tensors: "); - input_attrs_.resize(rk_io_num_.n_input); - output_attrs_.resize(rk_io_num_.n_output); - input_tensors_.resize(rk_io_num_.n_input); - output_tensors_.resize(rk_io_num_.n_output); - - for (int i = 0; i < rk_io_num_.n_input; ++i) { - memset(&input_attrs_[i], 0, sizeof(input_attrs_[i])); - memset(&input_tensors_[i], 0, sizeof(input_tensors_[i])); - input_attrs_[i].index = i; - ret = rknn_query(rk_ctx_, RKNN_QUERY_INPUT_ATTR, &(input_attrs_[i]), sizeof(rknn_tensor_attr)); - - INSPIRE_LOGD("input node index %d", i); - int channel = 3; - int width = 0; - int height = 0; - if (input_attrs_[i].fmt == RKNN_TENSOR_NCHW) { - INSPIRE_LOGD("models is NCHW input fmt"); - width = input_attrs_[i].dims[0]; - height = input_attrs_[i].dims[1]; - } else { - INSPIRE_LOGD("models is NHWC input fmt"); - width = input_attrs_[i].dims[1]; - height = input_attrs_[i].dims[2]; - } - INSPIRE_LOGD("models input height=%d, width=%d, channel=%d", height, width, channel); - // print_tensor_attr_(input_attrs_); - if (ret != RKNN_SUCC) { - INSPIRE_LOGE("rknn_query fail! ret=%d", ret); - return -1; - } - } - - // printf("[debug]models input num: %d, output num: %d\n", rk_io_num_.n_input, rk_io_num_.n_output); - for (int i = 0; i < rk_io_num_.n_output; ++i) { - memset(&output_attrs_[i], 0, sizeof(output_attrs_[i])); - memset(&output_tensors_[i], 0, sizeof(output_tensors_[i])); - output_attrs_[i].index = i; - ret = rknn_query(rk_ctx_, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs_[i]), sizeof(rknn_tensor_attr)); - - if (output_attrs_[i].qnt_type != RKNN_TENSOR_QNT_AFFINE_ASYMMETRIC || output_attrs_[i].type != RKNN_TENSOR_UINT8) { - // LOGE("The Demo required for a Affine asymmetric u8 quantized rknn models, but output quant type is %s, output " - // "data type is %s", - // get_qnt_type_string_(output_attrs_[i].qnt_type), get_type_string_(output_attrs_[i].type)); - // return -1; - } - // print_tensor_attr_(output_attrs_[i]); - - // rknn_tensor_attr rknn_attr; - // memset(&rknn_attr, 0, sizeof(rknn_tensor_attr)); - // - // ret = rknn_query(rk_ctx_, RKNN_QUERY_OUTPUT_ATTR, &rknn_attr, - // sizeof(rknn_tensor_attr)); - // printf("output node index %d \n", i); - // print_tensor_attr_(rknn_attr); - - if (ret != RKNN_SUCC) { - INSPIRE_LOGE("rknn_query fail! ret=%d", ret); - return -1; - } - } - - return ret; - } - -private: - rknn_context rk_ctx_; // rknn的上下文管理器 - rknn_input_output_num rk_io_num_; // rkn的输入输出流数量 - - std::vector input_attrs_; // 输入属性 - std::vector output_attrs_; // 输出属性 - std::vector input_tensors_; // 输入数据 - std::vector output_tensors_; // 输出数据 - - int outputs_want_float_ = 0; // 支持浮点输出 - - std::vector tensor_shape_; // 输入形状 - int width_; // 输入宽 - int height_; // 输入高 - bool run_status_; // 执行状态 - - unsigned char *model_data; // 模型数据流 -}; - -#endif // MAGIC_GESTURES_RKNN_ADAPTER_H diff --git a/cpp/inspireface/pipeline_module/attribute/mask_predict_adapt.cpp b/cpp/inspireface/pipeline_module/attribute/mask_predict_adapt.cpp index 10e5bb43..9bfe84f7 100644 --- a/cpp/inspireface/pipeline_module/attribute/mask_predict_adapt.cpp +++ b/cpp/inspireface/pipeline_module/attribute/mask_predict_adapt.cpp @@ -13,13 +13,13 @@ float MaskPredictAdapt::operator()(const inspirecv::Image& bgr_affine) { AnyTensorOutputs outputs; if (bgr_affine.Height() == m_input_size_ && bgr_affine.Width() == m_input_size_) { Forward(bgr_affine, outputs); + } else { // auto resized = bgr_affine.Resize(m_input_size_, m_input_size_); uint8_t* resized_data = nullptr; m_processor_->Resize(bgr_affine.Data(), bgr_affine.Width(), bgr_affine.Height(), bgr_affine.Channels(), &resized_data, m_input_size_, m_input_size_); auto resized = inspirecv::Image::Create(m_input_size_, m_input_size_, bgr_affine.Channels(), resized_data, false); - // resized.Write("mask_resize.jpg"); Forward(resized, outputs); } m_processor_->MarkDone(); diff --git a/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp b/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp index eb3e0806..cbc15efe 100644 --- a/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp +++ b/cpp/inspireface/track_module/face_detect/rnet_adapt.cpp @@ -19,7 +19,6 @@ float RNetAdapt::operator()(const inspirecv::Image &bgr_affine) { m_processor_->MarkDone(); #ifdef INFERENCE_HELPER_ENABLE_RKNN2 auto sm = Softmax(outputs[0].second); - std::cout << sm[0] << ", " << sm[1] << std ::endl; return sm[1]; #else return outputs[0].second[1]; diff --git a/cpp/sample/api/sample_face_track.cpp b/cpp/sample/api/sample_face_track.cpp index 3cf03463..13f64307 100644 --- a/cpp/sample/api/sample_face_track.cpp +++ b/cpp/sample/api/sample_face_track.cpp @@ -61,7 +61,7 @@ int main(int argc, char* argv[]) { HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_DETECT_MODE_LANDMARK; // Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without // tracking - HFDetectMode detMode = HF_DETECT_MODE_LIGHT_TRACK; + HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT; // Maximum number of faces detected HInt32 maxDetectNum = 20; // Face detection image input level @@ -94,12 +94,10 @@ int main(int argc, char* argv[]) { // Execute HF_FaceContextRunFaceTrack captures face information in an image HFMultipleFaceData multipleFaceData = {0}; - for (int i = 0; i < 3; i++) { - ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData); - if (ret != HSUCCEED) { - std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl; - return ret; - } + ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData); + if (ret != HSUCCEED) { + std::cout << "Execute HFExecuteFaceTrack error: " << ret << std::endl; + return ret; } // Print the number of faces detected diff --git a/doc/Error-Feedback-Codes.md b/doc/Error-Feedback-Codes.md index 985cc8bf..2c1feb16 100644 --- a/doc/Error-Feedback-Codes.md +++ b/doc/Error-Feedback-Codes.md @@ -23,34 +23,35 @@ During the use of InspireFace, some error feedback codes may be generated. Here | 18 | HERR_SESS_INVALID_RESOURCE | 1290 | Invalid static resource | | 19 | HERR_SESS_NUM_OF_MODELS_NOT_MATCH | 1291 | Number of models does not match | | 20 | HERR_SESS_LANDMARK_NUM_NOT_MATCH | 1300 | The number of input landmark points does not match | - | 21 | HERR_SESS_PIPELINE_FAILURE | 1288 | Pipeline module not initialized | - | 22 | HERR_SESS_REC_EXTRACT_FAILURE | 1295 | Face feature extraction not registered | - | 23 | HERR_SESS_REC_DEL_FAILURE | 1296 | Face feature deletion failed due to out of range index | - | 24 | HERR_SESS_REC_UPDATE_FAILURE | 1297 | Face feature update failed due to out of range index | - | 25 | HERR_SESS_REC_ADD_FEAT_EMPTY | 1298 | Feature vector for registration cannot be empty | - | 26 | HERR_SESS_REC_FEAT_SIZE_ERR | 1299 | Incorrect length of feature vector for registration | - | 27 | HERR_SESS_REC_INVALID_INDEX | 1300 | Invalid index number | - | 28 | HERR_SESS_REC_CONTRAST_FEAT_ERR | 1303 | Incorrect length of feature vector for comparison | - | 29 | HERR_SESS_REC_BLOCK_FULL | 1304 | Feature vector block full | - | 30 | HERR_SESS_REC_BLOCK_DEL_FAILURE | 1305 | Deletion failed | - | 31 | HERR_SESS_REC_BLOCK_UPDATE_FAILURE | 1306 | Update failed | - | 32 | HERR_SESS_REC_ID_ALREADY_EXIST | 1307 | ID already exists | - | 33 | HERR_SESS_FACE_DATA_ERROR | 1310 | Face data parsing | - | 34 | HERR_SESS_FACE_REC_OPTION_ERROR | 1320 | An optional parameter is incorrect | - | 35 | HERR_FT_HUB_DISABLE | 1329 | FeatureHub is disabled | - | 36 | HERR_FT_HUB_OPEN_ERROR | 1330 | Database open error | - | 37 | HERR_FT_HUB_NOT_OPENED | 1331 | Database not opened | - | 38 | HERR_FT_HUB_NO_RECORD_FOUND | 1332 | No record found | - | 39 | HERR_FT_HUB_CHECK_TABLE_ERROR | 1333 | Data table check error | - | 40 | HERR_FT_HUB_INSERT_FAILURE | 1334 | Data insertion error | - | 41 | HERR_FT_HUB_PREPARING_FAILURE | 1335 | Data preparation error | - | 42 | HERR_FT_HUB_EXECUTING_FAILURE | 1336 | SQL execution error | - | 43 | HERR_FT_HUB_NOT_VALID_FOLDER_PATH | 1337 | Invalid folder path | - | 44 | HERR_FT_HUB_ENABLE_REPETITION | 1338 | Enable db function repeatedly | - | 45 | HERR_FT_HUB_DISABLE_REPETITION | 1339 | Disable db function repeatedly | - | 46 | HERR_FT_HUB_NOT_FOUND_FEATURE | 1340 | Get face feature error | - | 47 | HERR_ARCHIVE_LOAD_FAILURE | 1360 | Archive load failure | - | 48 | HERR_ARCHIVE_LOAD_MODEL_FAILURE | 1361 | Model load failure | - | 49 | HERR_ARCHIVE_FILE_FORMAT_ERROR | 1362 | The archive format is incorrect | - | 50 | HERR_ARCHIVE_REPETITION_LOAD | 1363 | Do not reload the model | - | 51 | HERR_ARCHIVE_NOT_LOAD | 1364 | Model not loaded | + | 21 | HERR_SESS_LANDMARK_NOT_ENABLE | 1301 | The number of input landmark points does not match | + | 22 | HERR_SESS_PIPELINE_FAILURE | 1288 | Pipeline module not initialized | + | 23 | HERR_SESS_REC_EXTRACT_FAILURE | 1295 | Face feature extraction not registered | + | 24 | HERR_SESS_REC_DEL_FAILURE | 1296 | Face feature deletion failed due to out of range index | + | 25 | HERR_SESS_REC_UPDATE_FAILURE | 1297 | Face feature update failed due to out of range index | + | 26 | HERR_SESS_REC_ADD_FEAT_EMPTY | 1298 | Feature vector for registration cannot be empty | + | 27 | HERR_SESS_REC_FEAT_SIZE_ERR | 1299 | Incorrect length of feature vector for registration | + | 28 | HERR_SESS_REC_INVALID_INDEX | 1300 | Invalid index number | + | 29 | HERR_SESS_REC_CONTRAST_FEAT_ERR | 1303 | Incorrect length of feature vector for comparison | + | 30 | HERR_SESS_REC_BLOCK_FULL | 1304 | Feature vector block full | + | 31 | HERR_SESS_REC_BLOCK_DEL_FAILURE | 1305 | Deletion failed | + | 32 | HERR_SESS_REC_BLOCK_UPDATE_FAILURE | 1306 | Update failed | + | 33 | HERR_SESS_REC_ID_ALREADY_EXIST | 1307 | ID already exists | + | 34 | HERR_SESS_FACE_DATA_ERROR | 1310 | Face data parsing | + | 35 | HERR_SESS_FACE_REC_OPTION_ERROR | 1320 | An optional parameter is incorrect | + | 36 | HERR_FT_HUB_DISABLE | 1329 | FeatureHub is disabled | + | 37 | HERR_FT_HUB_OPEN_ERROR | 1330 | Database open error | + | 38 | HERR_FT_HUB_NOT_OPENED | 1331 | Database not opened | + | 39 | HERR_FT_HUB_NO_RECORD_FOUND | 1332 | No record found | + | 40 | HERR_FT_HUB_CHECK_TABLE_ERROR | 1333 | Data table check error | + | 41 | HERR_FT_HUB_INSERT_FAILURE | 1334 | Data insertion error | + | 42 | HERR_FT_HUB_PREPARING_FAILURE | 1335 | Data preparation error | + | 43 | HERR_FT_HUB_EXECUTING_FAILURE | 1336 | SQL execution error | + | 44 | HERR_FT_HUB_NOT_VALID_FOLDER_PATH | 1337 | Invalid folder path | + | 45 | HERR_FT_HUB_ENABLE_REPETITION | 1338 | Enable db function repeatedly | + | 46 | HERR_FT_HUB_DISABLE_REPETITION | 1339 | Disable db function repeatedly | + | 47 | HERR_FT_HUB_NOT_FOUND_FEATURE | 1340 | Get face feature error | + | 48 | HERR_ARCHIVE_LOAD_FAILURE | 1360 | Archive load failure | + | 49 | HERR_ARCHIVE_LOAD_MODEL_FAILURE | 1361 | Model load failure | + | 50 | HERR_ARCHIVE_FILE_FORMAT_ERROR | 1362 | The archive format is incorrect | + | 51 | HERR_ARCHIVE_REPETITION_LOAD | 1363 | Do not reload the model | + | 52 | HERR_ARCHIVE_NOT_LOAD | 1364 | Model not loaded | diff --git a/tools/error_table.md b/tools/error_table.md index 0bb29fd1..fc3d986d 100644 --- a/tools/error_table.md +++ b/tools/error_table.md @@ -13,39 +13,42 @@ | 11 | HERR_INVALID_IMAGE_STREAM_PARAM | 35 | Invalid image param | | 12 | HERR_INVALID_SERIALIZATION_FAILED | 36 | Invalid face serialization failed | | 13 | HERR_INVALID_DETECTION_INPUT | 37 | Failed to modify detector input size | - | 14 | HERR_SESS_BASE | 1280 | Session error types | - | 15 | HERR_SESS_FUNCTION_UNUSABLE | 1282 | Function not usable | - | 16 | HERR_SESS_TRACKER_FAILURE | 1283 | Tracker module not initialized | - | 17 | HERR_SESS_INVALID_RESOURCE | 1290 | Invalid static resource | - | 18 | HERR_SESS_NUM_OF_MODELS_NOT_MATCH | 1291 | Number of models does not match | - | 19 | HERR_SESS_LANDMARK_NUM_NOT_MATCH | 1300 | The number of input landmark points does not match | - | 20 | HERR_SESS_PIPELINE_FAILURE | 1288 | Pipeline module not initialized | - | 21 | HERR_SESS_REC_EXTRACT_FAILURE | 1295 | Face feature extraction not registered | - | 22 | HERR_SESS_REC_DEL_FAILURE | 1296 | Face feature deletion failed due to out of range index | - | 23 | HERR_SESS_REC_UPDATE_FAILURE | 1297 | Face feature update failed due to out of range index | - | 24 | HERR_SESS_REC_ADD_FEAT_EMPTY | 1298 | Feature vector for registration cannot be empty | - | 25 | HERR_SESS_REC_FEAT_SIZE_ERR | 1299 | Incorrect length of feature vector for registration | - | 26 | HERR_SESS_REC_INVALID_INDEX | 1300 | Invalid index number | - | 27 | HERR_SESS_REC_CONTRAST_FEAT_ERR | 1303 | Incorrect length of feature vector for comparison | - | 28 | HERR_SESS_REC_BLOCK_FULL | 1304 | Feature vector block full | - | 29 | HERR_SESS_REC_BLOCK_DEL_FAILURE | 1305 | Deletion failed | - | 30 | HERR_SESS_REC_BLOCK_UPDATE_FAILURE | 1306 | Update failed | - | 31 | HERR_SESS_REC_ID_ALREADY_EXIST | 1307 | ID already exists | - | 32 | HERR_SESS_FACE_DATA_ERROR | 1310 | Face data parsing | - | 33 | HERR_SESS_FACE_REC_OPTION_ERROR | 1320 | An optional parameter is incorrect | - | 34 | HERR_FT_HUB_DISABLE | 1329 | FeatureHub is disabled | - | 35 | HERR_FT_HUB_OPEN_ERROR | 1330 | Database open error | - | 36 | HERR_FT_HUB_NOT_OPENED | 1331 | Database not opened | - | 37 | HERR_FT_HUB_NO_RECORD_FOUND | 1332 | No record found | - | 38 | HERR_FT_HUB_CHECK_TABLE_ERROR | 1333 | Data table check error | - | 39 | HERR_FT_HUB_INSERT_FAILURE | 1334 | Data insertion error | - | 40 | HERR_FT_HUB_PREPARING_FAILURE | 1335 | Data preparation error | - | 41 | HERR_FT_HUB_EXECUTING_FAILURE | 1336 | SQL execution error | - | 42 | HERR_FT_HUB_NOT_VALID_FOLDER_PATH | 1337 | Invalid folder path | - | 43 | HERR_FT_HUB_ENABLE_REPETITION | 1338 | Enable db function repeatedly | - | 44 | HERR_FT_HUB_DISABLE_REPETITION | 1339 | Disable db function repeatedly | - | 45 | HERR_ARCHIVE_LOAD_FAILURE | 1360 | Archive load failure | - | 46 | HERR_ARCHIVE_LOAD_MODEL_FAILURE | 1361 | Model load failure | - | 47 | HERR_ARCHIVE_FILE_FORMAT_ERROR | 1362 | The archive format is incorrect | - | 48 | HERR_ARCHIVE_REPETITION_LOAD | 1363 | Do not reload the model | - | 49 | HERR_ARCHIVE_NOT_LOAD | 1364 | Model not loaded | + | 14 | HERR_INVALID_IMAGE_BITMAP_HANDLE | 38 | Invalid image bitmap handle | + | 15 | HERR_SESS_BASE | 1280 | Session error types | + | 16 | HERR_SESS_FUNCTION_UNUSABLE | 1282 | Function not usable | + | 17 | HERR_SESS_TRACKER_FAILURE | 1283 | Tracker module not initialized | + | 18 | HERR_SESS_INVALID_RESOURCE | 1290 | Invalid static resource | + | 19 | HERR_SESS_NUM_OF_MODELS_NOT_MATCH | 1291 | Number of models does not match | + | 20 | HERR_SESS_LANDMARK_NUM_NOT_MATCH | 1300 | The number of input landmark points does not match | + | 21 | HERR_SESS_LANDMARK_NOT_ENABLE | 1301 | The number of input landmark points does not match | + | 22 | HERR_SESS_PIPELINE_FAILURE | 1288 | Pipeline module not initialized | + | 23 | HERR_SESS_REC_EXTRACT_FAILURE | 1295 | Face feature extraction not registered | + | 24 | HERR_SESS_REC_DEL_FAILURE | 1296 | Face feature deletion failed due to out of range index | + | 25 | HERR_SESS_REC_UPDATE_FAILURE | 1297 | Face feature update failed due to out of range index | + | 26 | HERR_SESS_REC_ADD_FEAT_EMPTY | 1298 | Feature vector for registration cannot be empty | + | 27 | HERR_SESS_REC_FEAT_SIZE_ERR | 1299 | Incorrect length of feature vector for registration | + | 28 | HERR_SESS_REC_INVALID_INDEX | 1300 | Invalid index number | + | 29 | HERR_SESS_REC_CONTRAST_FEAT_ERR | 1303 | Incorrect length of feature vector for comparison | + | 30 | HERR_SESS_REC_BLOCK_FULL | 1304 | Feature vector block full | + | 31 | HERR_SESS_REC_BLOCK_DEL_FAILURE | 1305 | Deletion failed | + | 32 | HERR_SESS_REC_BLOCK_UPDATE_FAILURE | 1306 | Update failed | + | 33 | HERR_SESS_REC_ID_ALREADY_EXIST | 1307 | ID already exists | + | 34 | HERR_SESS_FACE_DATA_ERROR | 1310 | Face data parsing | + | 35 | HERR_SESS_FACE_REC_OPTION_ERROR | 1320 | An optional parameter is incorrect | + | 36 | HERR_FT_HUB_DISABLE | 1329 | FeatureHub is disabled | + | 37 | HERR_FT_HUB_OPEN_ERROR | 1330 | Database open error | + | 38 | HERR_FT_HUB_NOT_OPENED | 1331 | Database not opened | + | 39 | HERR_FT_HUB_NO_RECORD_FOUND | 1332 | No record found | + | 40 | HERR_FT_HUB_CHECK_TABLE_ERROR | 1333 | Data table check error | + | 41 | HERR_FT_HUB_INSERT_FAILURE | 1334 | Data insertion error | + | 42 | HERR_FT_HUB_PREPARING_FAILURE | 1335 | Data preparation error | + | 43 | HERR_FT_HUB_EXECUTING_FAILURE | 1336 | SQL execution error | + | 44 | HERR_FT_HUB_NOT_VALID_FOLDER_PATH | 1337 | Invalid folder path | + | 45 | HERR_FT_HUB_ENABLE_REPETITION | 1338 | Enable db function repeatedly | + | 46 | HERR_FT_HUB_DISABLE_REPETITION | 1339 | Disable db function repeatedly | + | 47 | HERR_FT_HUB_NOT_FOUND_FEATURE | 1340 | Get face feature error | + | 48 | HERR_ARCHIVE_LOAD_FAILURE | 1360 | Archive load failure | + | 49 | HERR_ARCHIVE_LOAD_MODEL_FAILURE | 1361 | Model load failure | + | 50 | HERR_ARCHIVE_FILE_FORMAT_ERROR | 1362 | The archive format is incorrect | + | 51 | HERR_ARCHIVE_REPETITION_LOAD | 1363 | Do not reload the model | + | 52 | HERR_ARCHIVE_NOT_LOAD | 1364 | Model not loaded | From 97cee551a5f9e868812e49d2c4c3ad484d972865 Mon Sep 17 00:00:00 2001 From: Jingyu Date: Wed, 8 Jan 2025 23:00:01 +0800 Subject: [PATCH 8/9] WIP: Complete rk356x --- .github/workflows/build_sdks.yaml | 46 ++++++++++++-- .../workflows/linux-armv7-rk356x-aarch64.yaml | 2 +- .github/workflows/release-sdks.yaml | 62 ++++++++++++++++++- 3 files changed, 104 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build_sdks.yaml b/.github/workflows/build_sdks.yaml index 338ba7ee..13747e51 100644 --- a/.github/workflows/build_sdks.yaml +++ b/.github/workflows/build_sdks.yaml @@ -64,7 +64,7 @@ jobs: ls build build_macos_arm64: - name: Compile MacOS-arm64 + name: Compile MacOS-Arm64 runs-on: macos-14 steps: @@ -89,7 +89,7 @@ jobs: ls build build_armv7_armhf: - name: Compile Armv7-armhf + name: Compile Armv7-Armhf runs-on: ubuntu-latest steps: @@ -256,7 +256,7 @@ jobs: ls build build_ios: - name: Compile iOS-arm64 + name: Compile iOS-Arm64 runs-on: macos-latest steps: @@ -302,7 +302,7 @@ jobs: ls build build_armv7_rv1106_armhf_uclibc: - name: Compile Linux-armv7 rv1106 + name: Compile Linux-Armv7 RV1106 runs-on: ubuntu-latest steps: @@ -336,4 +336,42 @@ jobs: - name: Start Building the InspireFace-Linux-armv7-rv1106 run: | bash command/build_cross_rv1106_armhf_uclibc.sh + ls build + + build_rk356x_aarch64: + name: Compile Linux-AArch64 RK356X + runs-on: ubuntu-latest + + steps: + # Step 1: Checkout the repository + - name: Checkout Repository + uses: actions/checkout@v4 + + # Step 2: Synchronize and update submodules recursively + - name: Update submodules + run: | + git clone --recurse-submodules https://github.com/tunmx/inspireface-3rdparty.git 3rdparty + + # Step 3: Install wget and xz-utils + - name: Install wget and xz-utils + run: | + sudo apt-get update + sudo apt-get install -y wget xz-utils + + # Step 4: Download Toolchain + - name: Download and Extract Linaro Toolchain + run: | + mkdir -p /opt/linaro-toolchain + wget -qO- https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/aarch64-linux-gnu/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu.tar.xz | tar -xJ -C /opt/linaro-toolchain --strip-components=1 + + # Step 5: Install Toolchain + - name: Set environment variables + run: | + echo "ARM_CROSS_COMPILE_TOOLCHAIN=/opt/linaro-toolchain" >> $GITHUB_ENV + echo "PATH=/opt/linaro-toolchain/bin:${PATH}" >> $GITHUB_ENV + + # Step 6: Start building the SDK + - name: Start Building the InspireFace-Linux-AArch64-rk356x + run: | + bash command/build_cross_rk356x_aarch64.sh ls build \ No newline at end of file diff --git a/.github/workflows/linux-armv7-rk356x-aarch64.yaml b/.github/workflows/linux-armv7-rk356x-aarch64.yaml index 788a1de1..edeb30d3 100644 --- a/.github/workflows/linux-armv7-rk356x-aarch64.yaml +++ b/.github/workflows/linux-armv7-rk356x-aarch64.yaml @@ -2,7 +2,7 @@ name: Build Linux-Ubuntu-AArch64-rk356x on: push: - branches: ["dev/rk356x"] + branches: ["feature/rk356x"] # Set the global GitHub token environment variable env: diff --git a/.github/workflows/release-sdks.yaml b/.github/workflows/release-sdks.yaml index 9471c9b3..a8733a01 100644 --- a/.github/workflows/release-sdks.yaml +++ b/.github/workflows/release-sdks.yaml @@ -548,10 +548,69 @@ jobs: path: | inspireface-macos-intel-x86-64-${{ env.VERSION }}.zip + build_rk356x_aarch64: + name: Compile Linux-AArch64 RK356X + runs-on: ubuntu-latest + + steps: + # Step 1: Checkout the repository + - name: Checkout Repository + uses: actions/checkout@v4 + + # Step 2: Extract the version number from the tag (e.g., "v1.2.3" becomes "1.2.3") + - name: Extract Version Number + id: extract_version + run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/} | sed 's/^v//')" >> $GITHUB_ENV + + # Step 3: Synchronize and update submodules recursively + - name: Update submodules + run: | + git clone --recurse-submodules https://github.com/tunmx/inspireface-3rdparty.git 3rdparty + + # Step 4: Install wget and xz-utils + - name: Install wget and xz-utils + run: | + sudo apt-get update + sudo apt-get install -y wget xz-utils + + # Step 5: Download Toolchain + - name: Download and Extract Linaro Toolchain + run: | + mkdir -p /opt/linaro-toolchain + wget -qO- https://releases.linaro.org/components/toolchain/binaries/6.3-2017.05/aarch64-linux-gnu/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu.tar.xz | tar -xJ -C /opt/linaro-toolchain --strip-components=1 + + # Step 6: Install Toolchain + - name: Set environment variables + run: | + echo "ARM_CROSS_COMPILE_TOOLCHAIN=/opt/linaro-toolchain" >> $GITHUB_ENV + echo "PATH=/opt/linaro-toolchain/bin:${PATH}" >> $GITHUB_ENV + + # Step 7: Start building the SDK + - name: Start Building the InspireFace-Linux-AArch64-rk356x + run: | + bash command/build_cross_rk356x_aarch64.sh + ls build + + # Step 7: Zip SDK directory + - name: Zip SDK directory + run: | + cd build + zip -r ../inspireface-linux-aarch64-rk356x-${{ env.VERSION }}.zip inspireface-linux-aarch64-rk356x-${{ env.VERSION }} + cd .. + stat inspireface-linux-aarch64-rk356x-${{ env.VERSION }}.zip + + # Step 8: Upload the zipped SDK files for the next job + - name: Upload SDK Artifacts + uses: actions/upload-artifact@v4 + with: + name: sdk_files_linux_aarch64_rk356x + path: | + inspireface-linux-aarch64-rk356x-${{ env.VERSION }}.zip + release: name: Release SDKs to GitHub - needs: [ build_linux_x86, build_armv7_armhf, build_armv7_rv1109rv1126_armhf, build_linux_aarch64, build_android, build_ios, build_manylinux2014_x86, build_armv7_rv1106_armhf_uclibc, build_macos_apple_silicon_arm64, build_macos_intel_x86_64] + needs: [ build_linux_x86, build_armv7_armhf, build_armv7_rv1109rv1126_armhf, build_linux_aarch64, build_android, build_ios, build_manylinux2014_x86, build_armv7_rv1106_armhf_uclibc, build_rk356x_aarch64, build_macos_apple_silicon_arm64, build_macos_intel_x86_64] runs-on: ubuntu-latest steps: @@ -584,6 +643,7 @@ jobs: sdk_artifacts/sdk_files_armv7_armhf/inspireface-linux-armv7-armhf-${{ env.VERSION }}.zip sdk_artifacts/sdk_files_armv7_rv1109rv1126_armhf/inspireface-linux-armv7-rv1109rv1126-armhf-${{ env.VERSION }}.zip sdk_artifacts/sdk_files_armv7_rv1106_armhf_uclibc/inspireface-linux-armv7-rv1106-armhf-uclibc-${{ env.VERSION }}.zip + sdk_artifacts/sdk_files_linux_aarch64_rk356x/inspireface-linux-aarch64-rk356x-${{ env.VERSION }}.zip sdk_artifacts/sdk_files_linux_aarch64/inspireface-linux-aarch64-${{ env.VERSION }}.zip sdk_artifacts/sdk_files_android/inspireface-android-${{ env.VERSION }}.zip sdk_artifacts/sdk_files_ios/inspireface-ios-${{ env.VERSION }}.zip From 9398b96a1c617ed816d4c2c5a0b8135b34ee7e47 Mon Sep 17 00:00:00 2001 From: Jingyu Date: Wed, 8 Jan 2025 23:20:43 +0800 Subject: [PATCH 9/9] Update: support rk356x --- README.md | 17 ++++++++++++----- docker-compose.yml | 36 +++++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 1b90c894..733507ff 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,11 @@ Please contact [contact@insightface.ai](mailto:contact@insightface.ai?subject=In ## Change Logs -**`2024-12-22`** Started adapting for multiple Rockchip devices with NPU support, beginning with RV1106 support. +**`2025-01-08`** Support inference on Rockchip devices **RK3566/RK3568** NPU. + +**`2024-12-25`** Add support for optional **RKRGA** image acceleration processing on Rockchip devices. + +**`2024-12-22`** Started adapting for multiple Rockchip devices with NPU support, beginning with **RV1103/RV1106** support. **`2024-12-10`** Added support for quick installation via Python package manager. @@ -132,6 +136,8 @@ bash command/download_models_general.sh Megatron bash command/download_models_general.sh Gundam_RV1109 # Download resource files for RV1106 bash command/download_models_general.sh Gundam_RV1106 +# Download resource files for RK356X +bash command/download_models_general.sh Gundam_RV356X # Download all model files bash command/download_models_general.sh @@ -227,9 +233,9 @@ We have completed the adaptation and testing of the software across various oper | 1 | **Linux-CPU** | ARMv7 | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) | | 2 | | ARMv8 | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) | | 3 | | x86/x86_64 | - | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | [![test](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/test_ubuntu_x86_Pikachu.yaml?style=for-the-badge&label=Test&color=blue)](https://github.com/HyperInspire/InspireFace/actions/workflows/test_ubuntu_x86_Pikachu.yaml) | -| 4 | **Linux-Rockchip** | ARMv7 | RV1109RV1126 | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) | -| 5 | | ARMv7 | RV1106 | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) |![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge)| -| 6 | | ARMv8 | RK3566/RK3568 | ![build](https://img.shields.io/badge/build-developing-yellow?style=for-the-badge) | | +| 4 | **Linux-Rockchip** | ARMv7 | RV1109/RV1126 | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) | +| 5 | | ARMv7 | RV1103/RV1106 | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) |![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge)| +| 6 | | ARMv8 | RK3566/RK3568 | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) | | 7 | | ARMv8 | RK3588 | ![build](https://img.shields.io/badge/build-developing-yellow?style=for-the-badge) | | | 8 | **Linux-CUDA** | x86/x86_64 | NVIDIA-GPU | ![build](https://img.shields.io/badge/OFFLINE-PASSING-green?style=for-the-badge) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) | | 9 | **MacOS** | Intel | CPU/Metal/**ANE** | [![build](https://img.shields.io/github/actions/workflow/status/HyperInspire/InspireFace/release-sdks.yaml?&style=for-the-badge&label=build)](https://github.com/HyperInspire/InspireFace/actions/workflows/release-sdks.yaml) | ![test](https://img.shields.io/badge/OFFLINE-PASSING-blue?style=for-the-badge) | @@ -530,7 +536,8 @@ For different scenarios, we currently provide several Packs, each containing mul | Pikachu | CPU | Lightweight edge-side models | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Pikachu) | | Megatron | CPU, GPU | Mobile and server models | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Megatron) | | Gundam-RV1109 | RKNPU | Supports RK1109 and RK1126 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1109) | -| Gundam-RV1106 | RKNPU | Supports RK1106(RV1103 may be supported, but not verified) | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1106) | +| Gundam-RV1106 | RKNPU | Supports RV1103 and RV1106 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RV1106) | +| Gundam-RK356X | RKNPU | Supports RK3566 and RK3568 | [Download](https://github.com/HyperInspire/InspireFace/releases/download/v1.x/Gundam_RK356X) | ## Acknowledgement diff --git a/docker-compose.yml b/docker-compose.yml index fb445ef3..f734fe80 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -3,32 +3,32 @@ services: build-ubuntu18: build: context: . - dockerfile: docker/Dockerfile.ubuntu18 # Use the Ubuntu18.04 + dockerfile: docker/Dockerfile.ubuntu18 # Use the Ubuntu18.04 environment: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_linux_ubuntu18.sh build-cross-rv1109rv1126-armhf: build: context: . - dockerfile: docker/Dockerfile.arm-linux-gnueabihf # Use the arm-linux-gnueabihf tool chain + dockerfile: docker/Dockerfile.arm-linux-gnueabihf # Use the arm-linux-gnueabihf tool chain environment: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_cross_rv1109rv1126_armhf.sh build-cross-armv7-armhf: build: context: . - dockerfile: docker/Dockerfile.arm-linux-gnueabihf # Use the arm-linux-gnueabihf tool chain + dockerfile: docker/Dockerfile.arm-linux-gnueabihf # Use the arm-linux-gnueabihf tool chain environment: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_cross_armv7_armhf.sh build-cross-rv1106-armhf-uclibc: build: @@ -38,27 +38,37 @@ services: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_cross_rv1106_armhf_uclibc.sh build-cross-aarch64: build: context: . - dockerfile: docker/Dockerfile.arm-linux-aarch64 # Use the arm-linux-gnueabihf tool chain + dockerfile: docker/Dockerfile.arm-linux-aarch64 # Use the arm-linux-gnueabihf tool chain environment: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_cross_aarch64.sh + build-cross-rk356x-aarch64: + build: + context: . + dockerfile: docker/Dockerfile.arm-linux-aarch64 # Use the arm-linux-gnueabihf tool chain + environment: + - VERSION=${VERSION} + working_dir: /workspace + volumes: + - .:/workspace # Mount the project root directory to the container + command: bash command/build_cross_rk356x_aarch64.sh build-cross-android: build: context: . - dockerfile: docker/Dockerfile.android # Use the arm-linux-gnueabihf tool chain + dockerfile: docker/Dockerfile.android # Use the arm-linux-gnueabihf tool chain environment: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_android.sh build-cuda-ubuntu20: build: @@ -68,7 +78,7 @@ services: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_linux_cuda.sh build-manylinux2014-x86: build: @@ -78,5 +88,5 @@ services: - VERSION=${VERSION} working_dir: /workspace volumes: - - .:/workspace # Mount the project root directory to the container + - .:/workspace # Mount the project root directory to the container command: bash command/build_wheel_manylinux2014_x86.sh