From b75e705d982786481d35f50d80f4e3e9ec1babc5 Mon Sep 17 00:00:00 2001 From: tunm Date: Wed, 22 Jan 2025 15:09:07 +0800 Subject: [PATCH] Add track cost time --- cpp/inspireface/c_api/inspireface.cc | 24 ++++ cpp/inspireface/c_api/inspireface.h | 14 ++ cpp/inspireface/face_session.cpp | 22 ++- cpp/inspireface/face_session.h | 18 +++ .../inference_helper/inference_helper_mnn.cpp | 135 +++++++++--------- cpp/sample/CMakeLists.txt | 5 + .../api/sample_face_track_benchmark.cpp | 130 +++++++++++++++++ 7 files changed, 277 insertions(+), 71 deletions(-) create mode 100644 cpp/sample/api/sample_face_track_benchmark.cpp diff --git a/cpp/inspireface/c_api/inspireface.cc b/cpp/inspireface/c_api/inspireface.cc index 21dc766f..76931761 100644 --- a/cpp/inspireface/c_api/inspireface.cc +++ b/cpp/inspireface/c_api/inspireface.cc @@ -649,6 +649,30 @@ HResult HFGetFaceFiveKeyPointsFromFaceToken(HFFaceBasicToken singleFace, HPoint2 return HSUCCEED; } +HResult HFSessionSetEnableTrackCostSpend(HFSession session, bool value) { + if (session == nullptr) { + return HERR_INVALID_CONTEXT_HANDLE; + } + HF_FaceAlgorithmSession *ctx = (HF_FaceAlgorithmSession *)session; + if (ctx == nullptr) { + return HERR_INVALID_CONTEXT_HANDLE; + } + ctx->impl.SetEnableTrackCostSpend(value); + return HSUCCEED; +} + +HResult HFSessionPrintTrackCostSpend(HFSession session) { + if (session == nullptr) { + return HERR_INVALID_CONTEXT_HANDLE; + } + HF_FaceAlgorithmSession *ctx = (HF_FaceAlgorithmSession *)session; + if (ctx == nullptr) { + return HERR_INVALID_CONTEXT_HANDLE; + } + ctx->impl.PrintTrackCostSpend(); + return HSUCCEED; +} + HResult HFFeatureHubFaceSearchThresholdSetting(float threshold) { FEATURE_HUB_DB->SetRecognitionThreshold(threshold); return HSUCCEED; diff --git a/cpp/inspireface/c_api/inspireface.h b/cpp/inspireface/c_api/inspireface.h index 3135488d..756bf0a5 100644 --- a/cpp/inspireface/c_api/inspireface.h +++ b/cpp/inspireface/c_api/inspireface.h @@ -554,6 +554,20 @@ HYPER_CAPI_EXPORT extern HResult HFGetFaceDenseLandmarkFromFaceToken(HFFaceBasic */ HYPER_CAPI_EXPORT extern HResult HFGetFaceFiveKeyPointsFromFaceToken(HFFaceBasicToken singleFace, HPoint2f *landmarks, HInt32 num); +/** + * @brief Set the enable cost spend + * @param value The enable cost spend value + * @return int32_t Status code of the operation. + * */ +HYPER_CAPI_EXPORT extern HResult HFSessionSetEnableTrackCostSpend(HFSession session, bool value); + +/** + * @brief Print the cost spend + * @param session The session handle + * @return int32_t Status code of the operation. + * */ +HYPER_CAPI_EXPORT extern HResult HFSessionPrintTrackCostSpend(HFSession session); + /************************************************************************ * Face Recognition ************************************************************************/ diff --git a/cpp/inspireface/face_session.cpp b/cpp/inspireface/face_session.cpp index ccda723c..31693d25 100644 --- a/cpp/inspireface/face_session.cpp +++ b/cpp/inspireface/face_session.cpp @@ -43,12 +43,16 @@ int32_t FaceSession::Configuration(DetectModuleMode detect_mode, int32_t max_det m_face_pipeline_ = std::make_shared(INSPIRE_LAUNCH->getMArchive(), param.enable_liveness, param.enable_mask_detect, param.enable_face_attribute, param.enable_interaction_liveness); + m_face_track_cost_ = std::make_shared("FaceTrack"); return HSUCCEED; } int32_t FaceSession::FaceDetectAndTrack(inspirecv::InspireImageProcess& process) { std::lock_guard lock(m_mtx_); + if (m_enable_track_cost_spend_) { + m_face_track_cost_->Start(); + } m_detect_cache_.clear(); m_face_basic_data_cache_.clear(); m_face_rects_cache_.clear(); @@ -107,7 +111,9 @@ int32_t FaceSession::FaceDetectAndTrack(inspirecv::InspireImageProcess& process) basic.dataSize = m_detect_cache_[i].size(); basic.data = m_detect_cache_[i].data(); } - + if (m_enable_track_cost_spend_) { + m_face_track_cost_->Stop(); + } // LOGD("Track COST: %f", m_face_track_->GetTrackTotalUseTime()); return HSUCCEED; } @@ -410,4 +416,16 @@ int32_t FaceSession::SetTrackModeDetectInterval(int value) { return HSUCCEED; } -} // namespace inspire \ No newline at end of file +int32_t FaceSession::SetEnableTrackCostSpend(bool value) { + m_enable_track_cost_spend_ = value; + m_face_track_cost_->Reset(); + return HSUCCEED; +} + +void FaceSession::PrintTrackCostSpend() { + if (m_enable_track_cost_spend_) { + INSPIRE_LOGI("%s", m_face_track_cost_->Report().c_str()); + } +} + +} // namespace inspire diff --git a/cpp/inspireface/face_session.h b/cpp/inspireface/face_session.h index 5b3d8c84..fd0c3241 100644 --- a/cpp/inspireface/face_session.h +++ b/cpp/inspireface/face_session.h @@ -8,6 +8,7 @@ #define INSPIRE_FACE_CONTEXT_H #include +#include #include "data_type.h" #include "track_module/face_track_module.h" #include "pipeline_module/face_pipeline_module.h" @@ -334,6 +335,18 @@ class INSPIRE_API FaceSession { * */ int32_t SetTrackModeDetectInterval(int value); + /** + * @brief Set the enable cost spend + * @param value The enable cost spend value + * @return int32_t Status code of the operation. + * */ + int32_t SetEnableTrackCostSpend(bool value); + + /** + * @brief Print the cost spend + * */ + void PrintTrackCostSpend(); + private: // Private member variables CustomPipelineParameter m_parameter_; ///< Stores custom parameters for the pipeline @@ -375,6 +388,11 @@ class INSPIRE_API FaceSession { float m_face_feature_norm_; ///< Cache for face feature norm std::mutex m_mtx_; ///< Mutex for thread safety. + + // cost spend + std::shared_ptr m_face_track_cost_; + + bool m_enable_track_cost_spend_ = false; }; } // namespace inspire diff --git a/cpp/inspireface/middleware/inference_helper/inference_helper_mnn.cpp b/cpp/inspireface/middleware/inference_helper/inference_helper_mnn.cpp index a624b7ff..881da897 100644 --- a/cpp/inspireface/middleware/inference_helper/inference_helper_mnn.cpp +++ b/cpp/inspireface/middleware/inference_helper/inference_helper_mnn.cpp @@ -35,40 +35,36 @@ limitations under the License. #include "log.h" /*** Macro ***/ #define TAG "InferenceHelperMnn" -#define PRINT(...) INFERENCE_HELPER_LOG_PRINT(TAG, __VA_ARGS__) +#define PRINT(...) INFERENCE_HELPER_LOG_PRINT(TAG, __VA_ARGS__) #define PRINT_E(...) INFERENCE_HELPER_LOG_PRINT_E(TAG, __VA_ARGS__) using namespace inspire; /*** Function ***/ -InferenceHelperMnn::InferenceHelperMnn() -{ +InferenceHelperMnn::InferenceHelperMnn() { num_threads_ = 1; } -InferenceHelperMnn::~InferenceHelperMnn() -{ -} +InferenceHelperMnn::~InferenceHelperMnn() {} -int32_t InferenceHelperMnn::SetNumThreads(const int32_t num_threads) -{ +int32_t InferenceHelperMnn::SetNumThreads(const int32_t num_threads) { num_threads_ = num_threads; return kRetOk; } -int32_t InferenceHelperMnn::SetCustomOps(const std::vector>& custom_ops) -{ +int32_t InferenceHelperMnn::SetCustomOps(const std::vector>& custom_ops) { PRINT("[WARNING] This method is not supported\n"); return kRetOk; } -int32_t InferenceHelperMnn::ParameterInitialization(std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) { +int32_t InferenceHelperMnn::ParameterInitialization(std::vector& input_tensor_info_list, + std::vector& output_tensor_info_list) { /* Check tensor info fits the info from model */ for (auto& input_tensor_info : input_tensor_info_list) { auto input_tensor = net_->getSessionInput(session_, input_tensor_info.name.c_str()); if (input_tensor == nullptr) { PRINT_E("Invalid input name (%s)\n", input_tensor_info.name.c_str()); -// LOGD("Invalid input name (%s)\n", input_tensor_info.name.c_str()); + // LOGD("Invalid input name (%s)\n", input_tensor_info.name.c_str()); return kRetErr; } if ((input_tensor->getType().code == halide_type_float) && (input_tensor_info.tensor_type == TensorInfo::kTensorTypeFp32)) { @@ -81,14 +77,16 @@ int32_t InferenceHelperMnn::ParameterInitialization(std::vector } if ((input_tensor->channel() != -1) && (input_tensor->height() != -1) && (input_tensor->width() != -1)) { if (input_tensor_info.GetChannel() != -1) { - if ((input_tensor->channel() == input_tensor_info.GetChannel()) && (input_tensor->height() == input_tensor_info.GetHeight()) && (input_tensor->width() == input_tensor_info.GetWidth())) { + if ((input_tensor->channel() == input_tensor_info.GetChannel()) && (input_tensor->height() == input_tensor_info.GetHeight()) && + (input_tensor->width() == input_tensor_info.GetWidth())) { /* OK */ } else { - INSPIRE_LOGW("W: %d != %d", input_tensor->width() , input_tensor_info.GetWidth()); - INSPIRE_LOGW("H: %d != %d", input_tensor->height() , input_tensor_info.GetHeight()); - INSPIRE_LOGW("C: %d != %d", input_tensor->channel() , input_tensor_info.GetChannel()); + INSPIRE_LOGW("W: %d != %d", input_tensor->width(), input_tensor_info.GetWidth()); + INSPIRE_LOGW("H: %d != %d", input_tensor->height(), input_tensor_info.GetHeight()); + INSPIRE_LOGW("C: %d != %d", input_tensor->channel(), input_tensor_info.GetChannel()); INSPIRE_LOGW("There may be some risk of input that is not used by model default"); - net_->resizeTensor(input_tensor, { 1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth() }); + net_->resizeTensor(input_tensor, + {1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth()}); net_->resizeSession(session_); return kRetOk; } @@ -103,7 +101,7 @@ int32_t InferenceHelperMnn::ParameterInitialization(std::vector if (input_tensor_info.GetChannel() != -1) { PRINT("Input tensor size is resized\n"); /* In case the input size is not fixed */ - net_->resizeTensor(input_tensor, { 1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth() }); + net_->resizeTensor(input_tensor, {1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth()}); net_->resizeSession(session_); INSPIRE_LOGE("GO RESIZE"); } else { @@ -126,7 +124,6 @@ int32_t InferenceHelperMnn::ParameterInitialization(std::vector ConvertNormalizeParameters(input_tensor_info); } - /* Check if tensor info is set */ for (const auto& input_tensor_info : input_tensor_info_list) { for (const auto& dim : input_tensor_info.tensor_dims) { @@ -136,34 +133,35 @@ int32_t InferenceHelperMnn::ParameterInitialization(std::vector } } } - //for (const auto& output_tensor_info : output_tensor_info_list) { - // for (const auto& dim : output_tensor_info.tensor_dims) { - // if (dim <= 0) { - // PRINT_E("Invalid tensor size\n"); - // return kRetErr; - // } - // } - //} + // for (const auto& output_tensor_info : output_tensor_info_list) { + // for (const auto& dim : output_tensor_info.tensor_dims) { + // if (dim <= 0) { + // PRINT_E("Invalid tensor size\n"); + // return kRetErr; + // } + // } + // } return kRetOk; } -int32_t InferenceHelperMnn::Initialize(char* model_buffer, int model_size, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) { -// PRINT("-Initialize\n"); -// LOGD("-Initialize"); +int32_t InferenceHelperMnn::Initialize(char* model_buffer, int model_size, std::vector& input_tensor_info_list, + std::vector& output_tensor_info_list) { + // PRINT("-Initialize\n"); + // LOGD("-Initialize"); /*** Create network ***/ -// LOG_INFO("init MNN"); -// PRINT_E("init MNN"); + // LOG_INFO("init MNN"); + // PRINT_E("init MNN"); net_.reset(MNN::Interpreter::createFromBuffer(model_buffer, model_size)); if (!net_) { PRINT_E("Failed to load model model buffer\n"); return kRetErr; } MNN::ScheduleConfig scheduleConfig; - scheduleConfig.numThread = num_threads_; // it seems, setting 1 has better performance on Android + scheduleConfig.numThread = num_threads_; // it seems, setting 1 has better performance on Android MNN::BackendConfig bnconfig; bnconfig.power = MNN::BackendConfig::Power_High; - bnconfig.precision = MNN::BackendConfig::Precision_High; + bnconfig.precision = MNN::BackendConfig::Precision_Normal; if (special_backend_ == kMnnCuda) { INSPIRE_LOGD("Enable CUDA"); scheduleConfig.type = MNN_FORWARD_CUDA; @@ -175,10 +173,10 @@ int32_t InferenceHelperMnn::Initialize(char* model_buffer, int model_size, std:: scheduleConfig.backendConfig = &bnconfig; session_ = net_->createSession(scheduleConfig); -// LOG_INFO("-INPUT: {}", net_->getSessionInputAll(session_).size()); -// PRINT("-INPUT: %lu", net_->getSessionInputAll(session_).size()); -// LOGD("-INPUT: %lu", net_->getSessionInputAll(session_).size()); - for (auto &item: net_->getSessionInputAll(session_)) { + // LOG_INFO("-INPUT: {}", net_->getSessionInputAll(session_).size()); + // PRINT("-INPUT: %lu", net_->getSessionInputAll(session_).size()); + // LOGD("-INPUT: %lu", net_->getSessionInputAll(session_).size()); + for (auto& item : net_->getSessionInputAll(session_)) { input_names_.push_back(item.first.c_str()); } if (!session_) { @@ -189,9 +187,9 @@ int32_t InferenceHelperMnn::Initialize(char* model_buffer, int model_size, std:: return ParameterInitialization(input_tensor_info_list, output_tensor_info_list); } -int32_t InferenceHelperMnn::Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, std::vector& output_tensor_info_list) -{ -// LOG_INFO("init MNN"); +int32_t InferenceHelperMnn::Initialize(const std::string& model_filename, std::vector& input_tensor_info_list, + std::vector& output_tensor_info_list) { + // LOG_INFO("init MNN"); /*** Create network ***/ net_.reset(MNN::Interpreter::createFromFile(model_filename.c_str())); if (!net_) { @@ -201,7 +199,7 @@ int32_t InferenceHelperMnn::Initialize(const std::string& model_filename, std::v MNN::ScheduleConfig scheduleConfig; scheduleConfig.type = MNN_FORWARD_CPU; - scheduleConfig.numThread = num_threads_; // it seems, setting 1 has better performance on Android + scheduleConfig.numThread = num_threads_; // it seems, setting 1 has better performance on Android // MNN::BackendConfig bnconfig; // bnconfig.power = MNN::BackendConfig::Power_High; // bnconfig.precision = MNN::BackendConfig::Precision_Low; @@ -213,12 +211,9 @@ int32_t InferenceHelperMnn::Initialize(const std::string& model_filename, std::v } return ParameterInitialization(input_tensor_info_list, output_tensor_info_list); - }; - -int32_t InferenceHelperMnn::Finalize(void) -{ +int32_t InferenceHelperMnn::Finalize(void) { net_->releaseSession(session_); net_->releaseModel(); net_.reset(); @@ -226,11 +221,10 @@ int32_t InferenceHelperMnn::Finalize(void) return kRetOk; } -int32_t InferenceHelperMnn::PreProcess(const std::vector& input_tensor_info_list) -{ -// for (auto &item: net_->getSessionInputAll(session_)) { -// PRINT("sss: %s", item.first.c_str()); -// } +int32_t InferenceHelperMnn::PreProcess(const std::vector& input_tensor_info_list) { + // for (auto &item: net_->getSessionInputAll(session_)) { + // PRINT("sss: %s", item.first.c_str()); + // } for (const auto& input_tensor_info : input_tensor_info_list) { auto input_tensor = net_->getSessionInput(session_, input_tensor_info.name.c_str()); if (input_tensor == nullptr) { @@ -240,21 +234,22 @@ int32_t InferenceHelperMnn::PreProcess(const std::vector& input } if (input_tensor_info.data_type == InputTensorInfo::kDataTypeImage) { /* Crop */ - if ((input_tensor_info.image_info.width != input_tensor_info.image_info.crop_width) || (input_tensor_info.image_info.height != input_tensor_info.image_info.crop_height)) { + if ((input_tensor_info.image_info.width != input_tensor_info.image_info.crop_width) || + (input_tensor_info.image_info.height != input_tensor_info.image_info.crop_height)) { PRINT_E("Crop is not supported\n"); return kRetErr; } MNN::CV::ImageProcess::Config image_processconfig; /* Convert color type */ -// LOGD("input_tensor_info.image_info.channel: %d", input_tensor_info.image_info.channel); -// LOGD("input_tensor_info.GetChannel(): %d", input_tensor_info.GetChannel()); + // LOGD("input_tensor_info.image_info.channel: %d", input_tensor_info.image_info.channel); + // LOGD("input_tensor_info.GetChannel(): %d", input_tensor_info.GetChannel()); // !!!!!! BUG !!!!!!!!! - // When initializing, setting the image channel to 3 and the tensor channel to 1, - // and configuring the processing to convert the color image to grayscale may cause some bugs. - // For example, the image channel might automatically change to 1. - // This issue has not been fully investigated, + // When initializing, setting the image channel to 3 and the tensor channel to 1, + // and configuring the processing to convert the color image to grayscale may cause some bugs. + // For example, the image channel might automatically change to 1. + // This issue has not been fully investigated, // so it's necessary to manually convert the image to grayscale before input. // !!!!!! BUG !!!!!!!!! @@ -271,7 +266,7 @@ int32_t InferenceHelperMnn::PreProcess(const std::vector& input } else if ((input_tensor_info.image_info.channel == 3) && (input_tensor_info.GetChannel() == 1)) { image_processconfig.sourceFormat = (input_tensor_info.image_info.is_bgr) ? MNN::CV::BGR : MNN::CV::RGB; image_processconfig.destFormat = MNN::CV::GRAY; -// LOGD("2gray"); + // LOGD("2gray"); } else if ((input_tensor_info.image_info.channel == 1) && (input_tensor_info.GetChannel() == 3)) { image_processconfig.sourceFormat = MNN::CV::GRAY; image_processconfig.destFormat = MNN::CV::BGR; @@ -283,19 +278,22 @@ int32_t InferenceHelperMnn::PreProcess(const std::vector& input /* Normalize image */ std::memcpy(image_processconfig.mean, input_tensor_info.normalize.mean, sizeof(image_processconfig.mean)); std::memcpy(image_processconfig.normal, input_tensor_info.normalize.norm, sizeof(image_processconfig.normal)); - + /* Resize image */ image_processconfig.filterType = MNN::CV::BILINEAR; MNN::CV::Matrix trans; - trans.setScale(static_cast(input_tensor_info.image_info.crop_width) / input_tensor_info.GetWidth(), static_cast(input_tensor_info.image_info.crop_height) / input_tensor_info.GetHeight()); + trans.setScale(static_cast(input_tensor_info.image_info.crop_width) / input_tensor_info.GetWidth(), + static_cast(input_tensor_info.image_info.crop_height) / input_tensor_info.GetHeight()); /* Do pre-process */ std::shared_ptr pretreat(MNN::CV::ImageProcess::create(image_processconfig)); pretreat->setMatrix(trans); -// LOGD("k1"); - pretreat->convert(static_cast(input_tensor_info.data), input_tensor_info.image_info.crop_width, input_tensor_info.image_info.crop_height, 0, input_tensor); + // LOGD("k1"); + pretreat->convert(static_cast(input_tensor_info.data), input_tensor_info.image_info.crop_width, + input_tensor_info.image_info.crop_height, 0, input_tensor); - } else if ( (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNhwc) || (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNchw) ) { + } else if ((input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNhwc) || + (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNchw)) { std::unique_ptr tensor; if (input_tensor_info.data_type == InputTensorInfo::kDataTypeBlobNhwc) { tensor.reset(new MNN::Tensor(input_tensor, MNN::Tensor::TENSORFLOW)); @@ -320,8 +318,7 @@ int32_t InferenceHelperMnn::PreProcess(const std::vector& input return kRetOk; } -int32_t InferenceHelperMnn::Process(std::vector& output_tensor_info_list) -{ +int32_t InferenceHelperMnn::Process(std::vector& output_tensor_info_list) { net_->runSession(session_); out_mat_list_.clear(); @@ -346,13 +343,13 @@ int32_t InferenceHelperMnn::Process(std::vector& output_tensor PRINT_E("Unexpected data type\n"); return kRetErr; } - + output_tensor_info.tensor_dims.clear(); for (int32_t dim = 0; dim < outputUser->dimensions(); dim++) { output_tensor_info.tensor_dims.push_back(outputUser->length(dim)); } - out_mat_list_.push_back(std::move(outputUser)); // store data in member variable so that data keep exist + out_mat_list_.push_back(std::move(outputUser)); // store data in member variable so that data keep exist } return kRetOk; @@ -365,7 +362,7 @@ std::vector InferenceHelperMnn::GetInputNames() { int32_t InferenceHelperMnn::ResizeInput(const std::vector& input_tensor_info_list) { for (const auto& input_tensor_info : input_tensor_info_list) { auto input_tensor = net_->getSessionInput(session_, input_tensor_info.name.c_str()); - net_->resizeTensor(input_tensor, { 1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth() }); + net_->resizeTensor(input_tensor, {1, input_tensor_info.GetChannel(), input_tensor_info.GetHeight(), input_tensor_info.GetWidth()}); net_->resizeSession(session_); } return 0; diff --git a/cpp/sample/CMakeLists.txt b/cpp/sample/CMakeLists.txt index e9d23ec0..3bb0084c 100644 --- a/cpp/sample/CMakeLists.txt +++ b/cpp/sample/CMakeLists.txt @@ -37,6 +37,11 @@ set_target_properties(FaceTrackSample PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/" ) +add_executable(FaceTrackBenchmarkSample api/sample_face_track_benchmark.cpp) +target_link_libraries(FaceTrackBenchmarkSample InspireFace ${ext}) +set_target_properties(FaceTrackBenchmarkSample PROPERTIES + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/sample/" +) # Examples of face recognition add_executable(FaceComparisonSample api/sample_face_comparison.cpp) diff --git a/cpp/sample/api/sample_face_track_benchmark.cpp b/cpp/sample/api/sample_face_track_benchmark.cpp new file mode 100644 index 00000000..84955470 --- /dev/null +++ b/cpp/sample/api/sample_face_track_benchmark.cpp @@ -0,0 +1,130 @@ +/** + * Created by Jingyu Yan + * @date 2024-10-01 + */ +#include +#include + +int main(int argc, char* argv[]) { + // Check whether the number of parameters is correct + if (argc < 3 || argc > 4) { + HFLogPrint(HF_LOG_ERROR, "Usage: %s [rotation]", argv[0]); + return 1; + } + + auto packPath = argv[1]; + auto sourcePath = argv[2]; + int rotation = 0; + + // If rotation is provided, check and set the value + if (argc == 4) { + rotation = std::atoi(argv[3]); + if (rotation != 0 && rotation != 90 && rotation != 180 && rotation != 270) { + HFLogPrint(HF_LOG_ERROR, "Invalid rotation value. Allowed values are 0, 90, 180, 270."); + return 1; + } + } + HFRotation rotation_enum; + // Set rotation based on input parameter + switch (rotation) { + case 90: + rotation_enum = HF_CAMERA_ROTATION_90; + break; + case 180: + rotation_enum = HF_CAMERA_ROTATION_180; + break; + case 270: + rotation_enum = HF_CAMERA_ROTATION_270; + break; + case 0: + default: + rotation_enum = HF_CAMERA_ROTATION_0; + break; + } + + HFLogPrint(HF_LOG_INFO, "Pack file Path: %s", packPath); + HFLogPrint(HF_LOG_INFO, "Source file Path: %s", sourcePath); + HFLogPrint(HF_LOG_INFO, "Rotation: %d", rotation); + + HFSetLogLevel(HF_LOG_INFO); + + HResult ret; + // The resource file must be loaded before it can be used + ret = HFLaunchInspireFace(packPath); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "Load Resource error: %d", ret); + return ret; + } + + // Enable the functions in the pipeline: mask detection, live detection, and face quality + // detection + HOption option = HF_ENABLE_QUALITY | HF_ENABLE_MASK_DETECT | HF_ENABLE_LIVENESS | HF_ENABLE_DETECT_MODE_LANDMARK; + // Non-video or frame sequence mode uses IMAGE-MODE, which is always face detection without + // tracking + HFDetectMode detMode = HF_DETECT_MODE_ALWAYS_DETECT; + // Maximum number of faces detected + HInt32 maxDetectNum = 20; + // Face detection image input level + HInt32 detectPixelLevel = 160; + // Handle of the current face SDK algorithm context + HFSession session = {0}; + ret = HFCreateInspireFaceSessionOptional(option, detMode, maxDetectNum, detectPixelLevel, -1, &session); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "Create FaceContext error: %d", ret); + return ret; + } + + HFSessionSetTrackPreviewSize(session, detectPixelLevel); + HFSessionSetFilterMinimumFacePixelSize(session, 4); + + // Load a image + HFImageBitmap image; + ret = HFCreateImageBitmapFromFilePath(sourcePath, 3, &image); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "The source entered is not a picture or read error."); + return ret; + } + // Prepare an image parameter structure for configuration + HFImageStream imageHandle = {0}; + ret = HFCreateImageStreamFromImageBitmap(image, rotation_enum, &imageHandle); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "Create ImageStream error: %d", ret); + return ret; + } + + int loop = 100; + + // Enable the cost spend + HFSessionSetEnableTrackCostSpend(session, true); + + // Execute HF_FaceContextRunFaceTrack captures face information in an image + HFMultipleFaceData multipleFaceData = {0}; + for (int i = 0; i < loop; i++) { + ret = HFExecuteFaceTrack(session, imageHandle, &multipleFaceData); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "Execute HFExecuteFaceTrack error: %d", ret); + return ret; + } + } + HFLogPrint(HF_LOG_INFO, "Number of Detection: %d", multipleFaceData.detectedNum); + HFSessionPrintTrackCostSpend(session); + + ret = HFReleaseImageStream(imageHandle); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "Release image stream error: %d", ret); + } + // The memory must be freed at the end of the program + ret = HFReleaseInspireFaceSession(session); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "Release session error: %d", ret); + return ret; + } + + ret = HFReleaseImageBitmap(image); + if (ret != HSUCCEED) { + HFLogPrint(HF_LOG_ERROR, "Release image bitmap error: %d", ret); + return ret; + } + + return 0; +}