From 609885e085b178e8b406999021480cf9607a2c70 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Mon, 1 Aug 2022 19:16:39 +0000 Subject: [PATCH 01/12] Exposing device selection as parameter --- README.md | 8 ++++++++ .../include/inference_pkg/inference_base.hpp | 4 +++- .../include/inference_pkg/intel_inference_eng.hpp | 3 ++- inference_pkg/src/inference_node.cpp | 14 ++++++++++++-- inference_pkg/src/intel_inference_eng.cpp | 5 +++-- 5 files changed, 28 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 9058b1e..b8ebb7c 100644 --- a/README.md +++ b/README.md @@ -134,6 +134,14 @@ The `inference_pkg_launch.py`, included in this package, provides an example dem |`load_model`|`LoadModelSrv`|Service that is responsible for setting pre-processing algorithm and inference tasks for the specific type of model loaded.| |`inference_state`|`InferenceStateSrv`|Service that is responsible for starting and stopping inference tasks.| + +### Parameters + +| Parameter name | Description | +| ---------------- | ----------- | +| `device` | String that is either `CPU`, `GPU` or `MYRIAD`. Default is `CPU`. `MYRIAD` is the Intel Compute Stick 2. | + + ## Resources * [Getting started with AWS DeepRacer OpenSource](https://github.com/aws-deepracer/aws-deepracer-launcher/blob/main/getting-started.md) diff --git a/inference_pkg/include/inference_pkg/inference_base.hpp b/inference_pkg/include/inference_pkg/inference_base.hpp index 0dd4208..6cddcb1 100644 --- a/inference_pkg/include/inference_pkg/inference_base.hpp +++ b/inference_pkg/include/inference_pkg/inference_base.hpp @@ -32,8 +32,10 @@ namespace InferTask { /// @returns True if model loaded successfully, false otherwise /// @param artifactPath Path to the model artifact. /// @param imgProcess Pointer to the image processing algorithm + /// @param device Reference to the compute device (CPU, GPU, MYRIAD) virtual bool loadModel(const char* artifactPath, - std::shared_ptr imgProcess) = 0; + std::shared_ptr imgProcess, + std::string device) = 0; /// Starts the inference task until stopped. virtual void startInference() = 0; /// Stops the inference task if running. diff --git a/inference_pkg/include/inference_pkg/intel_inference_eng.hpp b/inference_pkg/include/inference_pkg/intel_inference_eng.hpp index 47738f0..4295950 100644 --- a/inference_pkg/include/inference_pkg/intel_inference_eng.hpp +++ b/inference_pkg/include/inference_pkg/intel_inference_eng.hpp @@ -34,7 +34,8 @@ namespace IntelInferenceEngine { RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName); virtual ~RLInferenceModel(); virtual bool loadModel(const char* artifactPath, - std::shared_ptr imgProcess) override; + std::shared_ptr imgProcess, + std::string device) override; virtual void startInference() override; virtual void stopInference() override; /// Callback method to retrieve sensor data. diff --git a/inference_pkg/src/inference_node.cpp b/inference_pkg/src/inference_node.cpp index 56e71a5..12628fd 100644 --- a/inference_pkg/src/inference_node.cpp +++ b/inference_pkg/src/inference_node.cpp @@ -41,11 +41,18 @@ namespace InferTask { /// Class that will manage the inference task. In particular it will start and stop the /// inference tasks and feed the inference task the sensor data. /// @param nodeName Reference to the string containing name of the node. + /// @param device Reference to the compute device (CPU, GPU, MYRIAD) public: InferenceNodeMgr(const std::string & nodeName) - : Node(nodeName) + : Node(nodeName), + deviceName_("CPU") { RCLCPP_INFO(this->get_logger(), "%s started", nodeName.c_str()); + + this->declare_parameter("device", deviceName_); + // Device name; OpenVINO supports CPU, GPU and MYRIAD + deviceName_ = this->get_parameter("device").as_string(); + loadModelServiceCbGrp_ = this->create_callback_group(rclcpp::callback_group::CallbackGroupType::MutuallyExclusive); loadModelService_ = this->create_service("load_model", std::bind(&InferTask::InferenceNodeMgr::LoadModelHdl, @@ -129,7 +136,7 @@ namespace InferTask { RCLCPP_ERROR(this->get_logger(), "Unknown inference task"); return; } - itInferTask->second->loadModel(req->artifact_path.c_str(), itPreProcess->second); + itInferTask->second->loadModel(req->artifact_path.c_str(), itPreProcess->second, deviceName_); res->error = 0; } } @@ -149,6 +156,9 @@ namespace InferTask { /// List of available pre-processing algorithms. std::unordered_map> preProcessList_; /// Reference to the node handler. + + /// Compute device type. + std::string deviceName_; }; } diff --git a/inference_pkg/src/intel_inference_eng.cpp b/inference_pkg/src/intel_inference_eng.cpp index e9e2c40..6c65277 100644 --- a/inference_pkg/src/intel_inference_eng.cpp +++ b/inference_pkg/src/intel_inference_eng.cpp @@ -201,7 +201,8 @@ namespace IntelInferenceEngine { } bool RLInferenceModel::loadModel(const char* artifactPath, - std::shared_ptr imgProcess) { + std::shared_ptr imgProcess, + std::string device) { if (doInference_) { RCLCPP_ERROR(inferenceNode->get_logger(), "Please stop inference prior to loading a model"); return false; @@ -214,7 +215,7 @@ namespace IntelInferenceEngine { imgProcess_ = imgProcess; // Load the model try { - inferRequest_ = setMultiHeadModel(artifactPath, "CPU", core_, inputNamesArr_, + inferRequest_ = setMultiHeadModel(artifactPath, device, core_, inputNamesArr_, outputName_, InferenceEngine::Precision::FP32, InferenceEngine::Precision::FP32, inferenceNode); for(size_t i = 0; i != inputNamesArr_.size(); ++i) { From 1cfb07e87e9e2fb533bda247a10ec2c47a69ae87 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Wed, 31 Aug 2022 17:00:26 -0400 Subject: [PATCH 02/12] Compressed image --- .../include/inference_pkg/image_process.hpp | 17 +++++++++-------- inference_pkg/src/image_process.cpp | 10 +++++----- inference_pkg/src/intel_inference_eng.cpp | 6 +++--- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/inference_pkg/include/inference_pkg/image_process.hpp b/inference_pkg/include/inference_pkg/image_process.hpp index 33bce8d..b199b4c 100644 --- a/inference_pkg/include/inference_pkg/image_process.hpp +++ b/inference_pkg/include/inference_pkg/image_process.hpp @@ -19,6 +19,7 @@ #include "rclcpp/rclcpp.hpp" #include "sensor_msgs/msg/image.hpp" +#include "sensor_msgs/msg/compressed_image.hpp" #include "cv_bridge/cv_bridge.h" #include @@ -33,9 +34,9 @@ namespace InferTask { /// @param frameData ROS message containing the image data. /// @param retImg Open CV Mat object that will be used to store the post processed image /// @param params Hash map containing relevant pre-processing parameters - virtual void processImage(const sensor_msgs::msg::Image &frameData, cv::Mat& retImg, + virtual void processImage(const sensor_msgs::msg::CompressedImage &frameData, cv::Mat& retImg, const std::unordered_map ¶ms) = 0; - virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, + virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, const std::unordered_map ¶ms) = 0; /// Resets the image processing algorithms data if any. virtual void reset() = 0; @@ -49,9 +50,9 @@ namespace InferTask { public: RGB() = default; virtual ~RGB() = default; - virtual void processImage(const sensor_msgs::msg::Image &frameData, cv::Mat& retImg, + virtual void processImage(const sensor_msgs::msg::CompressedImage &frameData, cv::Mat& retImg, const std::unordered_map ¶ms) override; - virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, + virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, const std::unordered_map ¶ms) override {(void)frameDataArr;(void)retImg;(void)params;} virtual void reset() override {} virtual const std::string getEncode() const; @@ -67,9 +68,9 @@ namespace InferTask { /// @param isMask True if background masking should be performed on the image. Grey(bool isThreshold, bool isMask); virtual ~Grey() = default; - virtual void processImage(const sensor_msgs::msg::Image &frameData, cv::Mat& retImg, + virtual void processImage(const sensor_msgs::msg::CompressedImage &frameData, cv::Mat& retImg, const std::unordered_map ¶ms) override; - virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, + virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, const std::unordered_map ¶ms); virtual void reset() override; virtual const std::string getEncode() const; @@ -91,9 +92,9 @@ namespace InferTask { public: GreyDiff() = default; virtual ~GreyDiff() = default; - virtual void processImage(const sensor_msgs::msg::Image &frameData, cv::Mat& retImg, + virtual void processImage(const sensor_msgs::msg::CompressedImage &frameData, cv::Mat& retImg, const std::unordered_map ¶ms) override; - virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, + virtual void processImageVec(const std::vector &frameDataArr, cv::Mat& retImg, const std::unordered_map ¶ms) override {(void)frameDataArr;(void)retImg;(void)params;} virtual void reset() override; virtual const std::string getEncode() const; diff --git a/inference_pkg/src/image_process.cpp b/inference_pkg/src/image_process.cpp index 814c8a7..74e4e9d 100644 --- a/inference_pkg/src/image_process.cpp +++ b/inference_pkg/src/image_process.cpp @@ -29,7 +29,7 @@ namespace { /// @param frameData ROS image message containing the image data. /// @param retImg Reference to CV object to be populated the with resized image. /// @param params Hash map containing resize information. - bool cvtToCVObjResize (const sensor_msgs::msg::Image &frameData, cv::Mat &retImg, + bool cvtToCVObjResize (const sensor_msgs::msg::CompressedImage &frameData, cv::Mat &retImg, const std::unordered_map ¶ms) { cv_bridge::CvImagePtr cvPtr; @@ -121,7 +121,7 @@ namespace { } namespace InferTask { - void RGB::processImage(const sensor_msgs::msg::Image &frameData, cv::Mat &retImg, + void RGB::processImage(const sensor_msgs::msg::CompressedImage &frameData, cv::Mat &retImg, const std::unordered_map ¶ms) { cvtToCVObjResize(frameData, retImg, params); } @@ -137,7 +137,7 @@ namespace InferTask { } - void Grey::processImage(const sensor_msgs::msg::Image &frameData, cv::Mat &retImg, + void Grey::processImage(const sensor_msgs::msg::CompressedImage &frameData, cv::Mat &retImg, const std::unordered_map ¶ms) { cv::Mat currImg; if (cvtToCVObjResize(frameData, currImg, params)) { @@ -160,7 +160,7 @@ namespace InferTask { } } - void Grey::processImageVec(const std::vector &frameDataArr, cv::Mat &retImg, + void Grey::processImageVec(const std::vector &frameDataArr, cv::Mat &retImg, const std::unordered_map ¶ms) { // Left camera image is sent as the top image and the right camera image is sent as second in the vector. // Stack operation replaces the beginning values as we loop through and hence we loop in decreasing order @@ -188,7 +188,7 @@ namespace InferTask { return sensor_msgs::image_encodings::MONO8; } - void GreyDiff::processImage(const sensor_msgs::msg::Image &frameData, cv::Mat &retImg, + void GreyDiff::processImage(const sensor_msgs::msg::CompressedImage &frameData, cv::Mat &retImg, const std::unordered_map ¶ms) { (void)retImg; cv::Mat currImg; diff --git a/inference_pkg/src/intel_inference_eng.cpp b/inference_pkg/src/intel_inference_eng.cpp index 6c65277..1abf108 100644 --- a/inference_pkg/src/intel_inference_eng.cpp +++ b/inference_pkg/src/intel_inference_eng.cpp @@ -103,7 +103,7 @@ namespace { template void load1DImg(V *inputPtr, cv::Mat &retImg, std::shared_ptr imgProcessPtr, - const sensor_msgs::msg::Image &imgData, + const sensor_msgs::msg::CompressedImage &imgData, const std::unordered_map ¶ms) { imgProcessPtr->processImage(imgData, retImg, params); if (retImg.empty()) { @@ -127,7 +127,7 @@ namespace { template void loadStackImg(V *inputPtr, cv::Mat &retImg, std::shared_ptr imgProcessPtr, - const sensor_msgs::msg::Image &imgData, + const sensor_msgs::msg::CompressedImage &imgData, const std::unordered_map ¶ms) { imgProcessPtr->processImage(imgData, retImg, params); if (retImg.empty()) { @@ -150,7 +150,7 @@ namespace { template void loadStereoImg(V *inputPtr, cv::Mat &retImg, std::shared_ptr imgProcessPtr, - const std::vector &imgDataArr, + const std::vector &imgDataArr, const std::unordered_map ¶ms) { imgProcessPtr->processImageVec(imgDataArr, retImg, params); From 817505197b70a5e43704aea80417921bf1ac497c Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Sun, 25 Feb 2024 19:35:38 +0000 Subject: [PATCH 03/12] First attempt at node --- inference_pkg/CMakeLists.txt | 22 +- .../inference_pkg/tflite_inference_eng.hpp | 77 +++++ inference_pkg/src/inference_node.cpp | 4 +- inference_pkg/src/tflite_inference_eng.cpp | 304 ++++++++++++++++++ 4 files changed, 395 insertions(+), 12 deletions(-) create mode 100644 inference_pkg/include/inference_pkg/tflite_inference_eng.hpp create mode 100644 inference_pkg/src/tflite_inference_eng.cpp diff --git a/inference_pkg/CMakeLists.txt b/inference_pkg/CMakeLists.txt index 72aca35..92163d5 100644 --- a/inference_pkg/CMakeLists.txt +++ b/inference_pkg/CMakeLists.txt @@ -11,9 +11,13 @@ if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 17) endif() -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() +set(TENSORFLOW_SOURCE_DIR ${CMAKE_SOURCE_DIR}/../../../deps/tensorflow) + +add_subdirectory( + "${TENSORFLOW_SOURCE_DIR}/tensorflow/lite" + "${CMAKE_CURRENT_BINARY_DIR}/tensorflow-lite" + EXCLUDE_FROM_ALL +) # find dependencies find_package(ament_cmake REQUIRED) @@ -23,8 +27,6 @@ find_package(image_transport REQUIRED) find_package(cv_bridge REQUIRED) find_package(sensor_msgs REQUIRED) find_package(std_msgs REQUIRED) -find_package(ngraph REQUIRED) -find_package(InferenceEngine REQUIRED) find_package(OpenCV 4.2 QUIET COMPONENTS opencv_core @@ -44,22 +46,22 @@ endif() add_executable(inference_node src/inference_node.cpp - src/intel_inference_eng.cpp + src/tflite_inference_eng.cpp src/image_process.cpp ) target_include_directories(inference_node PRIVATE include ${OpenCV_INCLUDE_DIRS} - ${InferenceEngine_INCLUDE_DIRS} + ${CMAKE_CURRENT_BINARY_DIR}/flatbuffers/include + $ ) target_link_libraries(inference_node -lm -ldl ${OpenCV_LIBRARIES} - ${InferenceEngine_LIBRARIES} - ${NGRAPH_LIBRARIES}) + tensorflow-lite) -ament_target_dependencies(inference_node rclcpp deepracer_interfaces_pkg sensor_msgs std_msgs cv_bridge image_transport OpenCV InferenceEngine ngraph) +ament_target_dependencies(inference_node rclcpp deepracer_interfaces_pkg sensor_msgs std_msgs cv_bridge image_transport OpenCV) install(TARGETS inference_node diff --git a/inference_pkg/include/inference_pkg/tflite_inference_eng.hpp b/inference_pkg/include/inference_pkg/tflite_inference_eng.hpp new file mode 100644 index 0000000..cc2d82e --- /dev/null +++ b/inference_pkg/include/inference_pkg/tflite_inference_eng.hpp @@ -0,0 +1,77 @@ +/////////////////////////////////////////////////////////////////////////////////// +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"). // +// You may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef TFLITE_INFERENCE_ENG_HPP +#define TFLITE_INFERENCE_ENG_HPP + +#include "inference_pkg/inference_base.hpp" +#include "tensorflow/lite/interpreter.h" +#include "tensorflow/lite/kernels/register.h" +#include "tensorflow/lite/model.h" +#include "deepracer_interfaces_pkg/msg/evo_sensor_msg.hpp" +#include "deepracer_interfaces_pkg/msg/infer_results_array.hpp" +#include + +namespace TFLiteInferenceEngine { + class RLInferenceModel : public InferTask::InferenceBase + { + /// Concrete inference task class for running reinforcement learning models + /// on the GPU. + public: + /// @param node_name Name of the node to be created. + /// @param subName Name of the topic to subscribe to for sensor data. + RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName); + virtual ~RLInferenceModel(); + virtual bool loadModel(const char* artifactPath, + std::shared_ptr imgProcess, + std::string device) override; + virtual void startInference() override; + virtual void stopInference() override; + /// Callback method to retrieve sensor data. + /// @param msg Message returned by the ROS messaging system. + void sensorCB(const deepracer_interfaces_pkg::msg::EvoSensorMsg::SharedPtr msg); + + private: + /// Inference node object + std::shared_ptr inferenceNode; + /// ROS subscriber object to the desired sensor topic. + rclcpp::Subscription::SharedPtr sensorSub_; + /// ROS publisher object to the desired topic. + rclcpp::Publisher::SharedPtr resultPub_; + /// Pointer to image processing algorithm. + std::shared_ptr imgProcess_; + /// Inference state variable. + std::atomic doInference_; + /// Neural network Inference engine core object. + std::unique_ptr model_; + /// Inference request object + std::unique_ptr interpreter_; + /// Vector of hash map that stores all relevant pre-processing parameters for each input head. + std::vector> paramsArr_; + /// Vector of names of the input heads + std::vector inputNamesArr_; + /// Name of the output layer + std::string outputName_; + std::vector> inputDimsArr_; + std::vector> outputDimsArr_; + std::vector inputSizesArr_; + std::vector outputSizes_; + std::vector input_tensors_; + std::vector output_tensors_; + + }; +} +#endif \ No newline at end of file diff --git a/inference_pkg/src/inference_node.cpp b/inference_pkg/src/inference_node.cpp index 12628fd..37ff3e8 100644 --- a/inference_pkg/src/inference_node.cpp +++ b/inference_pkg/src/inference_node.cpp @@ -14,7 +14,7 @@ // limitations under the License. // /////////////////////////////////////////////////////////////////////////////////// -#include "inference_pkg/intel_inference_eng.hpp" +#include "inference_pkg/tflite_inference_eng.hpp" #include "deepracer_interfaces_pkg/srv/inference_state_srv.hpp" #include "deepracer_interfaces_pkg/srv/load_model_srv.hpp" @@ -126,7 +126,7 @@ namespace InferTask { if (itInferTask != taskList_.end() && itPreProcess != preProcessList_.end()) { switch(req->task_type) { case rlTask: - itInferTask->second.reset(new IntelInferenceEngine::RLInferenceModel(this->shared_from_this(), "/sensor_fusion_pkg/sensor_msg")); + itInferTask->second.reset(new TFLiteInferenceEngine::RLInferenceModel(this->shared_from_this(), "/sensor_fusion_pkg/sensor_msg")); break; case objDetectTask: //! TODO add onject detection when class is implemented. diff --git a/inference_pkg/src/tflite_inference_eng.cpp b/inference_pkg/src/tflite_inference_eng.cpp new file mode 100644 index 0000000..ba336bb --- /dev/null +++ b/inference_pkg/src/tflite_inference_eng.cpp @@ -0,0 +1,304 @@ +/////////////////////////////////////////////////////////////////////////////////// +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"). // +// You may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +/////////////////////////////////////////////////////////////////////////////////// + +#include "inference_pkg/tflite_inference_eng.hpp" + +// ROS2 message headers +#include "deepracer_interfaces_pkg/msg/infer_results.hpp" +#include "deepracer_interfaces_pkg/msg/infer_results_array.hpp" + +#include +#define RAD2DEG(x) ((x)*180./M_PI) + +const std::string LIDAR = "LIDAR"; +const std::string STEREO = "STEREO_CAMERAS"; +const std::string FRONT = "FRONT_FACING_CAMERA"; +const std::string OBS = "observation"; +const std::string LEFT = "LEFT_CAMERA"; + + +namespace { + class InferenceExcept : public std::exception + { + /// Simple exception class that is used to send a message to the catch clause. + public: + /// @param msg Message to be logged + InferenceExcept(std::string msg) + : msg_(msg) + { + } + virtual const char* what() const throw() override { + return msg_.c_str(); + } + private: + /// Store message in class so that the what method can dump it when invoked. + const std::string msg_; + }; + + /// Helper method that loads grey images into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param imgProcessPtr Pointer to the image processing algorithm. + /// @param imgData ROS message containing the image data. + /// @param params Hash map of relevant parameters for image processing. + template void load1DImg(V *inputPtr, + cv::Mat &retImg, + std::shared_ptr imgProcessPtr, + const sensor_msgs::msg::CompressedImage &imgData, + const std::unordered_map ¶ms) { + imgProcessPtr->processImage(imgData, retImg, params); + if (retImg.empty()) { + throw InferenceExcept("No image after pre-process"); + } + int height = retImg.rows; + int width = retImg.cols; + + for (int h = 0; h < height; h++) { + for (int w = 0; w < width; w++) { + inputPtr[h * width + w] = retImg.at(h, w); + } + } + } + + /// Helper method that loads multi channel images into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param imgProcessPtr Pointer to the image processing algorithm. + /// @param imgData ROS message containing the image data. + /// @param params Hash map of relevant parameters for image processing. + template void loadStackImg(V *inputPtr, + cv::Mat &retImg, + std::shared_ptr imgProcessPtr, + const sensor_msgs::msg::CompressedImage &imgData, + const std::unordered_map ¶ms) { + imgProcessPtr->processImage(imgData, retImg, params); + if (retImg.empty()) { + throw InferenceExcept("No image after-pre process"); + } + const int channelSize = retImg.rows * retImg.cols; + + for (size_t pixelNum = 0; pixelNum < channelSize; ++pixelNum) { + for (size_t ch = 0; ch < retImg.channels(); ++ch) { + inputPtr[(ch*channelSize) + pixelNum] = retImg.at(pixelNum)[ch]; + } + } + } + + /// Helper method that loads multi channel images into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param imgProcessPtr Pointer to the image processing algorithm. + /// @param imgData ROS message containing the image data. + /// @param params Hash map of relevant parameters for image processing. + template void loadStereoImg(V *inputPtr, + cv::Mat &retImg, + std::shared_ptr imgProcessPtr, + const std::vector &imgDataArr, + const std::unordered_map ¶ms) { + + imgProcessPtr->processImageVec(imgDataArr, retImg, params); + if (retImg.empty()) { + throw InferenceExcept("No image after-pre process"); + } + + const int width = retImg.cols; + const int height = retImg.rows; + const int channel = retImg.channels(); + + for (int c = 0; c < channel; c++) { + for (int h = 0; h < height; h++) { + for (int w = 0; w < width; w++) { + inputPtr[c * width * height + h * width + w] = retImg.at(h, w)[c]; + } + } + } + } + + /// Helper method that loads 1D data into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param lidarData ROS message containing the lidar data. + void loadLidarData(float *inputPtr, + const std::vector &lidar_data) { + size_t pixelNum = 0; + for(const auto& lidar_value : lidar_data) { + inputPtr[pixelNum] = lidar_value; + ++pixelNum; + } + } +} + +namespace TFLiteInferenceEngine { + RLInferenceModel::RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName) + : doInference_(false) + { + inferenceNode = inferenceNodePtr; + RCLCPP_INFO(inferenceNode->get_logger(), "Initializing RL Model"); + RCLCPP_INFO(inferenceNode->get_logger(), "%s", sensorSubName.c_str()); + // Subscribe to the sensor topic and set the call back + sensorSub_ = inferenceNode->create_subscription(sensorSubName, 10, std::bind(&TFLiteInferenceEngine::RLInferenceModel::sensorCB, this, std::placeholders::_1)); + resultPub_ = inferenceNode->create_publisher("rl_results", 1); + } + + RLInferenceModel::~RLInferenceModel() { + stopInference(); + } + + bool RLInferenceModel::loadModel(const char* artifactPath, + std::shared_ptr imgProcess, + std::string device) { + if (doInference_) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Please stop inference prior to loading a model"); + return false; + } + if (!imgProcess) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid image processing algorithm"); + return false; + } + // Set the image processing algorithms + imgProcess_ = imgProcess; + + // Load the model + try { + + model_ = tflite::FlatBufferModel::BuildFromFile(artifactPath); + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder(*model_, resolver)(&interpreter_); + + interpreter_->AllocateTensors(); + + // Determine input and output dimensions + for (auto i : interpreter_->inputs()) + { + auto const *input_tensor = interpreter_->tensor(i); + input_tensors_.push_back(input_tensor); + + auto dims = std::vector{}; + std::copy( + input_tensor->dims->data, input_tensor->dims->data + input_tensor->dims->size, + std::back_inserter(dims)); + + inputNamesArr_.push_back(interpreter_->GetInputName(i)); + inputDimsArr_.push_back(dims); + inputSizesArr_.push_back(input_tensor->bytes); + + std::unordered_map params_ = {{"width", input_tensor->dims->data[2]}, + {"height", input_tensor->dims->data[1]}, + {"channels", input_tensor->dims->data[0]}}; + paramsArr_.push_back(params_); + + RCLCPP_INFO(inferenceNode->get_logger(), "Input name: %s", interpreter_->GetInputName(i)); + RCLCPP_INFO(inferenceNode->get_logger(), "Input dimensions: %i x %i x %i", input_tensor->dims->data[2], input_tensor->dims->data[1], input_tensor->dims->data[0]); + } + + for (auto o : interpreter_->outputs()) + { + auto const *output_tensor = interpreter_->tensor(o); + output_tensors_.push_back(output_tensor); + + auto dims = std::vector{}; + std::copy( + output_tensor->dims->data, output_tensor->dims->data + output_tensor->dims->size, + std::back_inserter(dims)); + + RCLCPP_INFO(inferenceNode->get_logger(), "Output name: %s", interpreter_->GetOutputName(o)); + + outputDimsArr_.push_back(dims); + outputSizes_.push_back(output_tensor->bytes); + } + + } + catch (const std::exception &ex) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Model failed to load: %s", ex.what()); + return false; + } + return true; + } + + void RLInferenceModel::startInference() { + // Reset the image processing algorithm. + if (imgProcess_) { + imgProcess_->reset(); + } + doInference_ = true; + } + + void RLInferenceModel::stopInference() { + doInference_ = false; + } + + void RLInferenceModel::sensorCB(const deepracer_interfaces_pkg::msg::EvoSensorMsg::SharedPtr msg) { + if(!doInference_) { + return; + } + try { + for(size_t i = 0; i < inputNamesArr_.size(); ++i) { + float* inputLayer = interpreter_->typed_input_tensor(i); + + // Object that will hold the data sent to the inference engine post processed. + cv::Mat retData; + if (inputNamesArr_[i].find(STEREO) != std::string::npos) + { + loadStereoImg(inputLayer, retData, imgProcess_, msg->images, paramsArr_[i]); + } + else if (inputNamesArr_[i].find(FRONT) != std::string::npos + || inputNamesArr_[i].find(LEFT) != std::string::npos + || inputNamesArr_[i].find(OBS) != std::string::npos) { + load1DImg(inputLayer, retData, imgProcess_, msg->images.front(), paramsArr_[i]); + } + else if (inputNamesArr_[i].find(LIDAR) != std::string::npos){ + loadLidarData(inputLayer, msg->lidar_data); + } + else { + RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid input head"); + return; + } + imgProcess_->reset(); + } + // Do inference + interpreter_->Invoke(); + + // Last dimension of output is number of classes + auto nClasses = outputDimsArr_[0].back(); + + auto * outputData = output_tensors_[0]->data.f; + for (auto i = 0; i < nClasses; ++i) { + std::cout << std::to_string(i) << ": " << outputData[i] << std::endl; + } + + auto inferMsg = deepracer_interfaces_pkg::msg::InferResultsArray(); + for (size_t i = 0; i < msg->images.size(); ++i) { + // Send the image data over with the results + inferMsg.images.push_back(msg->images[i]) ; + } + + for (size_t label = 0; label < nClasses; ++label) { + auto inferData = deepracer_interfaces_pkg::msg::InferResults(); + inferData.class_label = label; + inferData.class_prob = outputData[label]; + // Set bounding box data to -1 to indicate to subscribers that this model offers no + // localization information. + inferData.x_min = -1.0; + inferData.y_min = -1.0; + inferData.x_max = -1.0; + inferData.y_max = -1.0; + inferMsg.results.push_back(inferData); + } + // Send results to all subscribers. + resultPub_->publish(inferMsg); + } + catch (const std::exception &ex) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Inference failed %s", ex.what()); + } + } +} From 60e7b0c0b1f79be020f6843e252660813cf766e4 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Mon, 26 Feb 2024 19:31:26 +0000 Subject: [PATCH 04/12] Updated build spec --- inference_pkg/CMakeLists.txt | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/inference_pkg/CMakeLists.txt b/inference_pkg/CMakeLists.txt index 92163d5..9d31e6d 100644 --- a/inference_pkg/CMakeLists.txt +++ b/inference_pkg/CMakeLists.txt @@ -1,5 +1,6 @@ cmake_minimum_required(VERSION 3.5) project(inference_pkg) +include(FetchContent) # Default to C99 if(NOT CMAKE_C_STANDARD) @@ -11,11 +12,14 @@ if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 17) endif() -set(TENSORFLOW_SOURCE_DIR ${CMAKE_SOURCE_DIR}/../../../deps/tensorflow) +FetchContent_Declare(tensorflow-lite + GIT_REPOSITORY https://github.com/tensorflow/tensorflow.git +) +FetchContent_Populate(tensorflow-lite) add_subdirectory( - "${TENSORFLOW_SOURCE_DIR}/tensorflow/lite" - "${CMAKE_CURRENT_BINARY_DIR}/tensorflow-lite" + ${tensorflow-lite_SOURCE_DIR}/tensorflow/lite + ${tensorflow-lite_BINARY_DIR} EXCLUDE_FROM_ALL ) From 16bd89333397049b493318d1e12406a9225b5d42 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Wed, 28 Feb 2024 20:04:14 +0000 Subject: [PATCH 05/12] Remove Intel --- .../inference_pkg/intel_inference_eng.hpp | 68 ---- inference_pkg/src/intel_inference_eng.cpp | 310 ------------------ 2 files changed, 378 deletions(-) delete mode 100644 inference_pkg/include/inference_pkg/intel_inference_eng.hpp delete mode 100644 inference_pkg/src/intel_inference_eng.cpp diff --git a/inference_pkg/include/inference_pkg/intel_inference_eng.hpp b/inference_pkg/include/inference_pkg/intel_inference_eng.hpp deleted file mode 100644 index 4295950..0000000 --- a/inference_pkg/include/inference_pkg/intel_inference_eng.hpp +++ /dev/null @@ -1,68 +0,0 @@ -/////////////////////////////////////////////////////////////////////////////////// -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // -// // -// Licensed under the Apache License, Version 2.0 (the "License"). // -// You may not use this file except in compliance with the License. // -// You may obtain a copy of the License at // -// // -// http://www.apache.org/licenses/LICENSE-2.0 // -// // -// Unless required by applicable law or agreed to in writing, software // -// distributed under the License is distributed on an "AS IS" BASIS, // -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // -// See the License for the specific language governing permissions and // -// limitations under the License. // -/////////////////////////////////////////////////////////////////////////////////// - -#ifndef INTEL_INFERENCE_ENG_HPP -#define INTEL_INFERENCE_ENG_HPP - -#include "inference_pkg/inference_base.hpp" -#include "inference_engine.hpp" -#include "deepracer_interfaces_pkg/msg/evo_sensor_msg.hpp" -#include "deepracer_interfaces_pkg/msg/infer_results_array.hpp" -#include - -namespace IntelInferenceEngine { - class RLInferenceModel : public InferTask::InferenceBase - { - /// Concrete inference task class for running reinforcement learning models - /// on the GPU. - public: - /// @param node_name Name of the node to be created. - /// @param subName Name of the topic to subscribe to for sensor data. - RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName); - virtual ~RLInferenceModel(); - virtual bool loadModel(const char* artifactPath, - std::shared_ptr imgProcess, - std::string device) override; - virtual void startInference() override; - virtual void stopInference() override; - /// Callback method to retrieve sensor data. - /// @param msg Message returned by the ROS messaging system. - void sensorCB(const deepracer_interfaces_pkg::msg::EvoSensorMsg::SharedPtr msg); - - private: - /// Inference node object - std::shared_ptr inferenceNode; - /// ROS subscriber object to the desired sensor topic. - rclcpp::Subscription::SharedPtr sensorSub_; - /// ROS publisher object to the desired topic. - rclcpp::Publisher::SharedPtr resultPub_; - /// Pointer to image processing algorithm. - std::shared_ptr imgProcess_; - /// Inference state variable. - std::atomic doInference_; - /// Neural network Inference engine core object. - InferenceEngine::Core core_; - /// Inference request object - InferenceEngine::InferRequest inferRequest_; - /// Vector of hash map that stores all relevant pre-processing parameters for each input head. - std::vector> paramsArr_; - /// Vector of names of the input heads - std::vector inputNamesArr_; - /// Name of the output layer - std::string outputName_; - }; -} -#endif \ No newline at end of file diff --git a/inference_pkg/src/intel_inference_eng.cpp b/inference_pkg/src/intel_inference_eng.cpp deleted file mode 100644 index 1abf108..0000000 --- a/inference_pkg/src/intel_inference_eng.cpp +++ /dev/null @@ -1,310 +0,0 @@ -/////////////////////////////////////////////////////////////////////////////////// -// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // -// // -// Licensed under the Apache License, Version 2.0 (the "License"). // -// You may not use this file except in compliance with the License. // -// You may obtain a copy of the License at // -// // -// http://www.apache.org/licenses/LICENSE-2.0 // -// // -// Unless required by applicable law or agreed to in writing, software // -// distributed under the License is distributed on an "AS IS" BASIS, // -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // -// See the License for the specific language governing permissions and // -// limitations under the License. // -/////////////////////////////////////////////////////////////////////////////////// - -#include "inference_pkg/intel_inference_eng.hpp" -/// Intel Open Vino specific headers -// #include "ie_plugin_dispatcher.hpp" -// #include "ie_plugin_ptr.hpp" -// #include "cpp/ie_cnn_net_reader.h" -// ROS2 message headers -#include "deepracer_interfaces_pkg/msg/infer_results.hpp" -#include "deepracer_interfaces_pkg/msg/infer_results_array.hpp" - -#include -#define RAD2DEG(x) ((x)*180./M_PI) - -const std::string LIDAR = "LIDAR"; -const std::string STEREO = "STEREO_CAMERAS"; -const std::string FRONT = "FRONT_FACING_CAMERA"; -const std::string OBS = "observation"; -const std::string LEFT = "LEFT_CAMERA"; - - -namespace { - class InferenceExcept : public std::exception - { - /// Simple exception class that is used to send a message to the catch clause. - public: - /// @param msg Message to be logged - InferenceExcept(std::string msg) - : msg_(msg) - { - } - virtual const char* what() const throw() override { - return msg_.c_str(); - } - private: - /// Store message in class so that the what method can dump it when invoked. - const std::string msg_; - }; - /// Helper method that loads the multi head model into the desired plugin. - /// @returns Inference request object that will be used to perform inference - /// @param artifactPath Path to the artifact (xml) file - /// @param device String value of the device being used (CPU/GPU) - /// @param core Reference to a InferenceEngine core object. - /// @param inputName Reference to the vector of input layer names - /// @param outputName Reference to the output layers name, the method will populate this string - /// @param inputPrec The precision to use for the input layer - /// @param outputPrec The precision to use for the output layer - InferenceEngine::InferRequest setMultiHeadModel(std::string artifactPath, const std::string &device, - InferenceEngine::Core core, std::vector &inputNamesArr, - std::string &outputName, const InferenceEngine::Precision &inputPrec, - const InferenceEngine::Precision &outputPrec, - std::shared_ptr inferenceNode) { - - RCLCPP_INFO(inferenceNode->get_logger(), "******* In setMultiHeadModel *******"); - // Validate the artifact path. - auto strIdx = artifactPath.rfind('.'); - if (strIdx == std::string::npos) { - throw InferenceExcept("Artifact missing file extension"); - } - if (artifactPath.substr(strIdx+1) != "xml") { - throw InferenceExcept("No xml extension found"); - } - - auto network = core.ReadNetwork(artifactPath); - // Loop through the inputNamesArr and set the precision - for (const auto& pair : network.getInputsInfo()) { - if(pair.first.rfind(OBS) != std::string::npos - || pair.first.rfind(LIDAR) != std::string::npos - || pair.first.rfind(FRONT) != std::string::npos - || pair.first.rfind(STEREO) != std::string::npos - || pair.first.rfind(LEFT) != std::string::npos) { - inputNamesArr.push_back(pair.first); - pair.second->setPrecision(inputPrec); - } - } - auto outputInfo = network.getOutputsInfo().begin()->second; - outputName = network.getOutputsInfo().begin()->first; - outputInfo->setPrecision(outputPrec); - - auto executableNetwork = core.LoadNetwork(network, device); - return executableNetwork.CreateInferRequest(); - } - - /// Helper method that loads grey images into the inference engine input - /// @param inputPtr Pointer to the input data. - /// @param imgProcessPtr Pointer to the image processing algorithm. - /// @param imgData ROS message containing the image data. - /// @param params Hash map of relevant parameters for image processing. - template void load1DImg(V *inputPtr, - cv::Mat &retImg, - std::shared_ptr imgProcessPtr, - const sensor_msgs::msg::CompressedImage &imgData, - const std::unordered_map ¶ms) { - imgProcessPtr->processImage(imgData, retImg, params); - if (retImg.empty()) { - throw InferenceExcept("No image after pre-process"); - } - int height = retImg.rows; - int width = retImg.cols; - - for (int h = 0; h < height; h++) { - for (int w = 0; w < width; w++) { - inputPtr[h * width + w] = retImg.at(h, w); - } - } - } - - /// Helper method that loads multi channel images into the inference engine input - /// @param inputPtr Pointer to the input data. - /// @param imgProcessPtr Pointer to the image processing algorithm. - /// @param imgData ROS message containing the image data. - /// @param params Hash map of relevant parameters for image processing. - template void loadStackImg(V *inputPtr, - cv::Mat &retImg, - std::shared_ptr imgProcessPtr, - const sensor_msgs::msg::CompressedImage &imgData, - const std::unordered_map ¶ms) { - imgProcessPtr->processImage(imgData, retImg, params); - if (retImg.empty()) { - throw InferenceExcept("No image after-pre process"); - } - const int channelSize = retImg.rows * retImg.cols; - - for (size_t pixelNum = 0; pixelNum < channelSize; ++pixelNum) { - for (size_t ch = 0; ch < retImg.channels(); ++ch) { - inputPtr[(ch*channelSize) + pixelNum] = retImg.at(pixelNum)[ch]; - } - } - } - - /// Helper method that loads multi channel images into the inference engine input - /// @param inputPtr Pointer to the input data. - /// @param imgProcessPtr Pointer to the image processing algorithm. - /// @param imgData ROS message containing the image data. - /// @param params Hash map of relevant parameters for image processing. - template void loadStereoImg(V *inputPtr, - cv::Mat &retImg, - std::shared_ptr imgProcessPtr, - const std::vector &imgDataArr, - const std::unordered_map ¶ms) { - - imgProcessPtr->processImageVec(imgDataArr, retImg, params); - if (retImg.empty()) { - throw InferenceExcept("No image after-pre process"); - } - - const int width = retImg.cols; - const int height = retImg.rows; - const int channel = retImg.channels(); - - for (int c = 0; c < channel; c++) { - for (int h = 0; h < height; h++) { - for (int w = 0; w < width; w++) { - inputPtr[c * width * height + h * width + w] = retImg.at(h, w)[c]; - } - } - } - } - - /// Helper method that loads 1D data into the inference engine input - /// @param inputPtr Pointer to the input data. - /// @param lidarData ROS message containing the lidar data. - void loadLidarData(float *inputPtr, - const std::vector &lidar_data) { - size_t pixelNum = 0; - for(const auto& lidar_value : lidar_data) { - inputPtr[pixelNum] = lidar_value; - ++pixelNum; - } - } -} - -namespace IntelInferenceEngine { - RLInferenceModel::RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName) - : doInference_(false) - { - inferenceNode = inferenceNodePtr; - RCLCPP_INFO(inferenceNode->get_logger(), "Initializing RL Model"); - RCLCPP_INFO(inferenceNode->get_logger(), "%s", sensorSubName.c_str()); - // Subscribe to the sensor topic and set the call back - sensorSub_ = inferenceNode->create_subscription(sensorSubName, 10, std::bind(&IntelInferenceEngine::RLInferenceModel::sensorCB, this, std::placeholders::_1)); - resultPub_ = inferenceNode->create_publisher("rl_results", 1); - } - - RLInferenceModel::~RLInferenceModel() { - stopInference(); - } - - bool RLInferenceModel::loadModel(const char* artifactPath, - std::shared_ptr imgProcess, - std::string device) { - if (doInference_) { - RCLCPP_ERROR(inferenceNode->get_logger(), "Please stop inference prior to loading a model"); - return false; - } - if (!imgProcess) { - RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid image processing algorithm"); - return false; - } - // Set the image processing algorithms - imgProcess_ = imgProcess; - // Load the model - try { - inferRequest_ = setMultiHeadModel(artifactPath, device, core_, inputNamesArr_, - outputName_, InferenceEngine::Precision::FP32, - InferenceEngine::Precision::FP32, inferenceNode); - for(size_t i = 0; i != inputNamesArr_.size(); ++i) { - auto input = inferRequest_.GetBlob(inputNamesArr_[i]); - std::unordered_map params_ = {{"width", input->getTensorDesc().getDims()[3]}, - {"height", input->getTensorDesc().getDims()[2]}, - {"channels", input->getTensorDesc().getDims()[1]}}; - paramsArr_.push_back(params_); - } - } - catch (const std::exception &ex) { - RCLCPP_ERROR(inferenceNode->get_logger(), "Model failed to load: %s", ex.what()); - return false; - } - return true; - } - - void RLInferenceModel::startInference() { - // Reset the image processing algorithm. - if (imgProcess_) { - imgProcess_->reset(); - } - doInference_ = true; - } - - void RLInferenceModel::stopInference() { - doInference_ = false; - } - - void RLInferenceModel::sensorCB(const deepracer_interfaces_pkg::msg::EvoSensorMsg::SharedPtr msg) { - if(!doInference_) { - return; - } - try { - for(size_t i = 0; i < inputNamesArr_.size(); ++i) { - auto inputPtr = inferRequest_.GetBlob(inputNamesArr_[i])->buffer().as::value_type *>(); - - // Object that will hold the data sent to the inference engine post processed. - cv::Mat retData; - if (inputNamesArr_[i].find(STEREO) != std::string::npos) - { - loadStereoImg(inputPtr, retData, imgProcess_, msg->images, paramsArr_[i]); - } - else if (inputNamesArr_[i].find(FRONT) != std::string::npos - || inputNamesArr_[i].find(LEFT) != std::string::npos - || inputNamesArr_[i].find(OBS) != std::string::npos) { - load1DImg(inputPtr, retData, imgProcess_, msg->images.front(), paramsArr_[i]); - } - else if (inputNamesArr_[i].find(LIDAR) != std::string::npos){ - loadLidarData(inputPtr, msg->lidar_data); - } - else { - RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid input head"); - return; - } - imgProcess_->reset(); - } - // Do inference - inferRequest_.Infer(); - - auto output = inferRequest_.GetBlob(outputName_); - // Package the results and publish to all subscribers. - - auto outputDims = output->getTensorDesc().getDims(); - auto outputData = output->buffer().as::value_type*>(); - - auto inferMsg = deepracer_interfaces_pkg::msg::InferResultsArray(); - for (size_t i = 0; i < msg->images.size(); ++i) { - // Send the image data over with the results - inferMsg.images.push_back(msg->images[i]) ; - } - - for (size_t label = 0; label < outputDims[1]; ++label) { - auto inferData = deepracer_interfaces_pkg::msg::InferResults(); - inferData.class_label = label; - inferData.class_prob = outputData[label]; - // Set bounding box data to -1 to indicate to subscribers that this model offers no - // localization information. - inferData.x_min = -1.0; - inferData.y_min = -1.0; - inferData.x_max = -1.0; - inferData.y_max = -1.0; - inferMsg.results.push_back(inferData); - } - // Send results to all subscribers. - resultPub_->publish(inferMsg); - } - catch (const std::exception &ex) { - RCLCPP_ERROR(inferenceNode->get_logger(), "Inference failed %s", ex.what()); - } - } -} From b27ee0280f95bdfe64102e0400ba0168f494679c Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Wed, 28 Feb 2024 20:04:31 +0000 Subject: [PATCH 06/12] Simplify --- .../inference_pkg/tflite_inference_eng.hpp | 4 ---- inference_pkg/src/tflite_inference_eng.cpp | 19 +++++++++++++++---- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/inference_pkg/include/inference_pkg/tflite_inference_eng.hpp b/inference_pkg/include/inference_pkg/tflite_inference_eng.hpp index cc2d82e..1aa5106 100644 --- a/inference_pkg/include/inference_pkg/tflite_inference_eng.hpp +++ b/inference_pkg/include/inference_pkg/tflite_inference_eng.hpp @@ -65,11 +65,7 @@ namespace TFLiteInferenceEngine { std::vector inputNamesArr_; /// Name of the output layer std::string outputName_; - std::vector> inputDimsArr_; std::vector> outputDimsArr_; - std::vector inputSizesArr_; - std::vector outputSizes_; - std::vector input_tensors_; std::vector output_tensors_; }; diff --git a/inference_pkg/src/tflite_inference_eng.cpp b/inference_pkg/src/tflite_inference_eng.cpp index ba336bb..a268e0b 100644 --- a/inference_pkg/src/tflite_inference_eng.cpp +++ b/inference_pkg/src/tflite_inference_eng.cpp @@ -164,9 +164,24 @@ namespace TFLiteInferenceEngine { RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid image processing algorithm"); return false; } + + // Validate the artifact path. + auto strIdx = ((std::string) artifactPath).rfind('.'); + if (strIdx == std::string::npos) { + throw InferenceExcept("Artifact missing file extension"); + } + if (((std::string) artifactPath).substr(strIdx+1) != "tflite") { + throw InferenceExcept("No tflite extension found"); + } + // Set the image processing algorithms imgProcess_ = imgProcess; + // Clean up vectors + inputNamesArr_.clear(); + outputDimsArr_.clear(); + output_tensors_.clear(); + // Load the model try { @@ -181,7 +196,6 @@ namespace TFLiteInferenceEngine { for (auto i : interpreter_->inputs()) { auto const *input_tensor = interpreter_->tensor(i); - input_tensors_.push_back(input_tensor); auto dims = std::vector{}; std::copy( @@ -189,8 +203,6 @@ namespace TFLiteInferenceEngine { std::back_inserter(dims)); inputNamesArr_.push_back(interpreter_->GetInputName(i)); - inputDimsArr_.push_back(dims); - inputSizesArr_.push_back(input_tensor->bytes); std::unordered_map params_ = {{"width", input_tensor->dims->data[2]}, {"height", input_tensor->dims->data[1]}, @@ -214,7 +226,6 @@ namespace TFLiteInferenceEngine { RCLCPP_INFO(inferenceNode->get_logger(), "Output name: %s", interpreter_->GetOutputName(o)); outputDimsArr_.push_back(dims); - outputSizes_.push_back(output_tensor->bytes); } } From 6b6e26fcfdad90cbd605ac6d9b7fa7500d1f3d66 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Sun, 3 Mar 2024 17:18:22 +0000 Subject: [PATCH 07/12] Re-add Intel --- .../inference_pkg/intel_inference_eng.hpp | 68 ++++ inference_pkg/src/intel_inference_eng.cpp | 310 ++++++++++++++++++ 2 files changed, 378 insertions(+) create mode 100644 inference_pkg/include/inference_pkg/intel_inference_eng.hpp create mode 100644 inference_pkg/src/intel_inference_eng.cpp diff --git a/inference_pkg/include/inference_pkg/intel_inference_eng.hpp b/inference_pkg/include/inference_pkg/intel_inference_eng.hpp new file mode 100644 index 0000000..4295950 --- /dev/null +++ b/inference_pkg/include/inference_pkg/intel_inference_eng.hpp @@ -0,0 +1,68 @@ +/////////////////////////////////////////////////////////////////////////////////// +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"). // +// You may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +/////////////////////////////////////////////////////////////////////////////////// + +#ifndef INTEL_INFERENCE_ENG_HPP +#define INTEL_INFERENCE_ENG_HPP + +#include "inference_pkg/inference_base.hpp" +#include "inference_engine.hpp" +#include "deepracer_interfaces_pkg/msg/evo_sensor_msg.hpp" +#include "deepracer_interfaces_pkg/msg/infer_results_array.hpp" +#include + +namespace IntelInferenceEngine { + class RLInferenceModel : public InferTask::InferenceBase + { + /// Concrete inference task class for running reinforcement learning models + /// on the GPU. + public: + /// @param node_name Name of the node to be created. + /// @param subName Name of the topic to subscribe to for sensor data. + RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName); + virtual ~RLInferenceModel(); + virtual bool loadModel(const char* artifactPath, + std::shared_ptr imgProcess, + std::string device) override; + virtual void startInference() override; + virtual void stopInference() override; + /// Callback method to retrieve sensor data. + /// @param msg Message returned by the ROS messaging system. + void sensorCB(const deepracer_interfaces_pkg::msg::EvoSensorMsg::SharedPtr msg); + + private: + /// Inference node object + std::shared_ptr inferenceNode; + /// ROS subscriber object to the desired sensor topic. + rclcpp::Subscription::SharedPtr sensorSub_; + /// ROS publisher object to the desired topic. + rclcpp::Publisher::SharedPtr resultPub_; + /// Pointer to image processing algorithm. + std::shared_ptr imgProcess_; + /// Inference state variable. + std::atomic doInference_; + /// Neural network Inference engine core object. + InferenceEngine::Core core_; + /// Inference request object + InferenceEngine::InferRequest inferRequest_; + /// Vector of hash map that stores all relevant pre-processing parameters for each input head. + std::vector> paramsArr_; + /// Vector of names of the input heads + std::vector inputNamesArr_; + /// Name of the output layer + std::string outputName_; + }; +} +#endif \ No newline at end of file diff --git a/inference_pkg/src/intel_inference_eng.cpp b/inference_pkg/src/intel_inference_eng.cpp new file mode 100644 index 0000000..1abf108 --- /dev/null +++ b/inference_pkg/src/intel_inference_eng.cpp @@ -0,0 +1,310 @@ +/////////////////////////////////////////////////////////////////////////////////// +// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // +// // +// Licensed under the Apache License, Version 2.0 (the "License"). // +// You may not use this file except in compliance with the License. // +// You may obtain a copy of the License at // +// // +// http://www.apache.org/licenses/LICENSE-2.0 // +// // +// Unless required by applicable law or agreed to in writing, software // +// distributed under the License is distributed on an "AS IS" BASIS, // +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // +// See the License for the specific language governing permissions and // +// limitations under the License. // +/////////////////////////////////////////////////////////////////////////////////// + +#include "inference_pkg/intel_inference_eng.hpp" +/// Intel Open Vino specific headers +// #include "ie_plugin_dispatcher.hpp" +// #include "ie_plugin_ptr.hpp" +// #include "cpp/ie_cnn_net_reader.h" +// ROS2 message headers +#include "deepracer_interfaces_pkg/msg/infer_results.hpp" +#include "deepracer_interfaces_pkg/msg/infer_results_array.hpp" + +#include +#define RAD2DEG(x) ((x)*180./M_PI) + +const std::string LIDAR = "LIDAR"; +const std::string STEREO = "STEREO_CAMERAS"; +const std::string FRONT = "FRONT_FACING_CAMERA"; +const std::string OBS = "observation"; +const std::string LEFT = "LEFT_CAMERA"; + + +namespace { + class InferenceExcept : public std::exception + { + /// Simple exception class that is used to send a message to the catch clause. + public: + /// @param msg Message to be logged + InferenceExcept(std::string msg) + : msg_(msg) + { + } + virtual const char* what() const throw() override { + return msg_.c_str(); + } + private: + /// Store message in class so that the what method can dump it when invoked. + const std::string msg_; + }; + /// Helper method that loads the multi head model into the desired plugin. + /// @returns Inference request object that will be used to perform inference + /// @param artifactPath Path to the artifact (xml) file + /// @param device String value of the device being used (CPU/GPU) + /// @param core Reference to a InferenceEngine core object. + /// @param inputName Reference to the vector of input layer names + /// @param outputName Reference to the output layers name, the method will populate this string + /// @param inputPrec The precision to use for the input layer + /// @param outputPrec The precision to use for the output layer + InferenceEngine::InferRequest setMultiHeadModel(std::string artifactPath, const std::string &device, + InferenceEngine::Core core, std::vector &inputNamesArr, + std::string &outputName, const InferenceEngine::Precision &inputPrec, + const InferenceEngine::Precision &outputPrec, + std::shared_ptr inferenceNode) { + + RCLCPP_INFO(inferenceNode->get_logger(), "******* In setMultiHeadModel *******"); + // Validate the artifact path. + auto strIdx = artifactPath.rfind('.'); + if (strIdx == std::string::npos) { + throw InferenceExcept("Artifact missing file extension"); + } + if (artifactPath.substr(strIdx+1) != "xml") { + throw InferenceExcept("No xml extension found"); + } + + auto network = core.ReadNetwork(artifactPath); + // Loop through the inputNamesArr and set the precision + for (const auto& pair : network.getInputsInfo()) { + if(pair.first.rfind(OBS) != std::string::npos + || pair.first.rfind(LIDAR) != std::string::npos + || pair.first.rfind(FRONT) != std::string::npos + || pair.first.rfind(STEREO) != std::string::npos + || pair.first.rfind(LEFT) != std::string::npos) { + inputNamesArr.push_back(pair.first); + pair.second->setPrecision(inputPrec); + } + } + auto outputInfo = network.getOutputsInfo().begin()->second; + outputName = network.getOutputsInfo().begin()->first; + outputInfo->setPrecision(outputPrec); + + auto executableNetwork = core.LoadNetwork(network, device); + return executableNetwork.CreateInferRequest(); + } + + /// Helper method that loads grey images into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param imgProcessPtr Pointer to the image processing algorithm. + /// @param imgData ROS message containing the image data. + /// @param params Hash map of relevant parameters for image processing. + template void load1DImg(V *inputPtr, + cv::Mat &retImg, + std::shared_ptr imgProcessPtr, + const sensor_msgs::msg::CompressedImage &imgData, + const std::unordered_map ¶ms) { + imgProcessPtr->processImage(imgData, retImg, params); + if (retImg.empty()) { + throw InferenceExcept("No image after pre-process"); + } + int height = retImg.rows; + int width = retImg.cols; + + for (int h = 0; h < height; h++) { + for (int w = 0; w < width; w++) { + inputPtr[h * width + w] = retImg.at(h, w); + } + } + } + + /// Helper method that loads multi channel images into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param imgProcessPtr Pointer to the image processing algorithm. + /// @param imgData ROS message containing the image data. + /// @param params Hash map of relevant parameters for image processing. + template void loadStackImg(V *inputPtr, + cv::Mat &retImg, + std::shared_ptr imgProcessPtr, + const sensor_msgs::msg::CompressedImage &imgData, + const std::unordered_map ¶ms) { + imgProcessPtr->processImage(imgData, retImg, params); + if (retImg.empty()) { + throw InferenceExcept("No image after-pre process"); + } + const int channelSize = retImg.rows * retImg.cols; + + for (size_t pixelNum = 0; pixelNum < channelSize; ++pixelNum) { + for (size_t ch = 0; ch < retImg.channels(); ++ch) { + inputPtr[(ch*channelSize) + pixelNum] = retImg.at(pixelNum)[ch]; + } + } + } + + /// Helper method that loads multi channel images into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param imgProcessPtr Pointer to the image processing algorithm. + /// @param imgData ROS message containing the image data. + /// @param params Hash map of relevant parameters for image processing. + template void loadStereoImg(V *inputPtr, + cv::Mat &retImg, + std::shared_ptr imgProcessPtr, + const std::vector &imgDataArr, + const std::unordered_map ¶ms) { + + imgProcessPtr->processImageVec(imgDataArr, retImg, params); + if (retImg.empty()) { + throw InferenceExcept("No image after-pre process"); + } + + const int width = retImg.cols; + const int height = retImg.rows; + const int channel = retImg.channels(); + + for (int c = 0; c < channel; c++) { + for (int h = 0; h < height; h++) { + for (int w = 0; w < width; w++) { + inputPtr[c * width * height + h * width + w] = retImg.at(h, w)[c]; + } + } + } + } + + /// Helper method that loads 1D data into the inference engine input + /// @param inputPtr Pointer to the input data. + /// @param lidarData ROS message containing the lidar data. + void loadLidarData(float *inputPtr, + const std::vector &lidar_data) { + size_t pixelNum = 0; + for(const auto& lidar_value : lidar_data) { + inputPtr[pixelNum] = lidar_value; + ++pixelNum; + } + } +} + +namespace IntelInferenceEngine { + RLInferenceModel::RLInferenceModel(std::shared_ptr inferenceNodePtr, const std::string &sensorSubName) + : doInference_(false) + { + inferenceNode = inferenceNodePtr; + RCLCPP_INFO(inferenceNode->get_logger(), "Initializing RL Model"); + RCLCPP_INFO(inferenceNode->get_logger(), "%s", sensorSubName.c_str()); + // Subscribe to the sensor topic and set the call back + sensorSub_ = inferenceNode->create_subscription(sensorSubName, 10, std::bind(&IntelInferenceEngine::RLInferenceModel::sensorCB, this, std::placeholders::_1)); + resultPub_ = inferenceNode->create_publisher("rl_results", 1); + } + + RLInferenceModel::~RLInferenceModel() { + stopInference(); + } + + bool RLInferenceModel::loadModel(const char* artifactPath, + std::shared_ptr imgProcess, + std::string device) { + if (doInference_) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Please stop inference prior to loading a model"); + return false; + } + if (!imgProcess) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid image processing algorithm"); + return false; + } + // Set the image processing algorithms + imgProcess_ = imgProcess; + // Load the model + try { + inferRequest_ = setMultiHeadModel(artifactPath, device, core_, inputNamesArr_, + outputName_, InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP32, inferenceNode); + for(size_t i = 0; i != inputNamesArr_.size(); ++i) { + auto input = inferRequest_.GetBlob(inputNamesArr_[i]); + std::unordered_map params_ = {{"width", input->getTensorDesc().getDims()[3]}, + {"height", input->getTensorDesc().getDims()[2]}, + {"channels", input->getTensorDesc().getDims()[1]}}; + paramsArr_.push_back(params_); + } + } + catch (const std::exception &ex) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Model failed to load: %s", ex.what()); + return false; + } + return true; + } + + void RLInferenceModel::startInference() { + // Reset the image processing algorithm. + if (imgProcess_) { + imgProcess_->reset(); + } + doInference_ = true; + } + + void RLInferenceModel::stopInference() { + doInference_ = false; + } + + void RLInferenceModel::sensorCB(const deepracer_interfaces_pkg::msg::EvoSensorMsg::SharedPtr msg) { + if(!doInference_) { + return; + } + try { + for(size_t i = 0; i < inputNamesArr_.size(); ++i) { + auto inputPtr = inferRequest_.GetBlob(inputNamesArr_[i])->buffer().as::value_type *>(); + + // Object that will hold the data sent to the inference engine post processed. + cv::Mat retData; + if (inputNamesArr_[i].find(STEREO) != std::string::npos) + { + loadStereoImg(inputPtr, retData, imgProcess_, msg->images, paramsArr_[i]); + } + else if (inputNamesArr_[i].find(FRONT) != std::string::npos + || inputNamesArr_[i].find(LEFT) != std::string::npos + || inputNamesArr_[i].find(OBS) != std::string::npos) { + load1DImg(inputPtr, retData, imgProcess_, msg->images.front(), paramsArr_[i]); + } + else if (inputNamesArr_[i].find(LIDAR) != std::string::npos){ + loadLidarData(inputPtr, msg->lidar_data); + } + else { + RCLCPP_ERROR(inferenceNode->get_logger(), "Invalid input head"); + return; + } + imgProcess_->reset(); + } + // Do inference + inferRequest_.Infer(); + + auto output = inferRequest_.GetBlob(outputName_); + // Package the results and publish to all subscribers. + + auto outputDims = output->getTensorDesc().getDims(); + auto outputData = output->buffer().as::value_type*>(); + + auto inferMsg = deepracer_interfaces_pkg::msg::InferResultsArray(); + for (size_t i = 0; i < msg->images.size(); ++i) { + // Send the image data over with the results + inferMsg.images.push_back(msg->images[i]) ; + } + + for (size_t label = 0; label < outputDims[1]; ++label) { + auto inferData = deepracer_interfaces_pkg::msg::InferResults(); + inferData.class_label = label; + inferData.class_prob = outputData[label]; + // Set bounding box data to -1 to indicate to subscribers that this model offers no + // localization information. + inferData.x_min = -1.0; + inferData.y_min = -1.0; + inferData.x_max = -1.0; + inferData.y_max = -1.0; + inferMsg.results.push_back(inferData); + } + // Send results to all subscribers. + resultPub_->publish(inferMsg); + } + catch (const std::exception &ex) { + RCLCPP_ERROR(inferenceNode->get_logger(), "Inference failed %s", ex.what()); + } + } +} From 7ac1e25e56c1aa222823774e45761fe5bca06268 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Sun, 3 Mar 2024 17:28:18 +0000 Subject: [PATCH 08/12] Enable both OpenVINO and TFLITE --- inference_pkg/CMakeLists.txt | 11 +++++++++-- inference_pkg/src/inference_node.cpp | 19 ++++++++++++++++--- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/inference_pkg/CMakeLists.txt b/inference_pkg/CMakeLists.txt index 9d31e6d..5b63935 100644 --- a/inference_pkg/CMakeLists.txt +++ b/inference_pkg/CMakeLists.txt @@ -31,6 +31,8 @@ find_package(image_transport REQUIRED) find_package(cv_bridge REQUIRED) find_package(sensor_msgs REQUIRED) find_package(std_msgs REQUIRED) +find_package(ngraph REQUIRED) +find_package(InferenceEngine REQUIRED) find_package(OpenCV 4.2 QUIET COMPONENTS opencv_core @@ -51,21 +53,26 @@ endif() add_executable(inference_node src/inference_node.cpp src/tflite_inference_eng.cpp + src/intel_inference_eng.cpp src/image_process.cpp ) target_include_directories(inference_node PRIVATE include ${OpenCV_INCLUDE_DIRS} + ${InferenceEngine_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}/flatbuffers/include $ ) target_link_libraries(inference_node -lm -ldl ${OpenCV_LIBRARIES} - tensorflow-lite) + tensorflow-lite + ${InferenceEngine_LIBRARIES} + ${NGRAPH_LIBRARIES} +) -ament_target_dependencies(inference_node rclcpp deepracer_interfaces_pkg sensor_msgs std_msgs cv_bridge image_transport OpenCV) +ament_target_dependencies(inference_node rclcpp deepracer_interfaces_pkg sensor_msgs std_msgs cv_bridge image_transport OpenCV InferenceEngine ngraph) install(TARGETS inference_node diff --git a/inference_pkg/src/inference_node.cpp b/inference_pkg/src/inference_node.cpp index 37ff3e8..a5c8a5f 100644 --- a/inference_pkg/src/inference_node.cpp +++ b/inference_pkg/src/inference_node.cpp @@ -14,6 +14,7 @@ // limitations under the License. // /////////////////////////////////////////////////////////////////////////////////// +#include "inference_pkg/intel_inference_eng.hpp" #include "inference_pkg/tflite_inference_eng.hpp" #include "deepracer_interfaces_pkg/srv/inference_state_srv.hpp" #include "deepracer_interfaces_pkg/srv/load_model_srv.hpp" @@ -45,7 +46,8 @@ namespace InferTask { public: InferenceNodeMgr(const std::string & nodeName) : Node(nodeName), - deviceName_("CPU") + deviceName_("CPU"), + inferenceEngine_("TFLITE") { RCLCPP_INFO(this->get_logger(), "%s started", nodeName.c_str()); @@ -53,6 +55,10 @@ namespace InferTask { // Device name; OpenVINO supports CPU, GPU and MYRIAD deviceName_ = this->get_parameter("device").as_string(); + this->declare_parameter("inference_engine", inferenceEngine_); + // Inference Engine name; TFLITE or OPENVINO + inferenceEngine_ = this->get_parameter("inference_engine").as_string(); + loadModelServiceCbGrp_ = this->create_callback_group(rclcpp::callback_group::CallbackGroupType::MutuallyExclusive); loadModelService_ = this->create_service("load_model", std::bind(&InferTask::InferenceNodeMgr::LoadModelHdl, @@ -126,7 +132,12 @@ namespace InferTask { if (itInferTask != taskList_.end() && itPreProcess != preProcessList_.end()) { switch(req->task_type) { case rlTask: - itInferTask->second.reset(new TFLiteInferenceEngine::RLInferenceModel(this->shared_from_this(), "/sensor_fusion_pkg/sensor_msg")); + if (inferenceEngine_.compare("TFLITE") == 0) { + itInferTask->second.reset(new TFLiteInferenceEngine::RLInferenceModel(this->shared_from_this(), "/sensor_fusion_pkg/sensor_msg")); + } else { + itInferTask->second.reset(new IntelInferenceEngine::RLInferenceModel(this->shared_from_this(), "/sensor_fusion_pkg/sensor_msg")); + } + break; case objDetectTask: //! TODO add onject detection when class is implemented. @@ -158,7 +169,9 @@ namespace InferTask { /// Reference to the node handler. /// Compute device type. - std::string deviceName_; + std::string deviceName_; + /// Inference Engine parameter. + std::string inferenceEngine_; }; } From 97cf8829a6d9aa787091778ac0854e8f2a2e8fb8 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Sun, 3 Mar 2024 18:49:32 +0000 Subject: [PATCH 09/12] Tune CMake --- inference_pkg/CMakeLists.txt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/inference_pkg/CMakeLists.txt b/inference_pkg/CMakeLists.txt index 5b63935..2991e4d 100644 --- a/inference_pkg/CMakeLists.txt +++ b/inference_pkg/CMakeLists.txt @@ -2,6 +2,8 @@ cmake_minimum_required(VERSION 3.5) project(inference_pkg) include(FetchContent) +set(ABSL_PROPAGATE_CXX_STD ON) + # Default to C99 if(NOT CMAKE_C_STANDARD) set(CMAKE_C_STANDARD 99) @@ -12,6 +14,10 @@ if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 17) endif() +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wno-deprecated-declarations -Wno-ignored-attributes) +endif() + FetchContent_Declare(tensorflow-lite GIT_REPOSITORY https://github.com/tensorflow/tensorflow.git ) From 3ce3e1d6b7078df8b576fe8e749cfd5e4a249b83 Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Sun, 17 Mar 2024 13:26:59 +0000 Subject: [PATCH 10/12] Publish loading model --- inference_pkg/src/inference_node.cpp | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/inference_pkg/src/inference_node.cpp b/inference_pkg/src/inference_node.cpp index a5c8a5f..d59636d 100644 --- a/inference_pkg/src/inference_node.cpp +++ b/inference_pkg/src/inference_node.cpp @@ -16,6 +16,7 @@ #include "inference_pkg/intel_inference_eng.hpp" #include "inference_pkg/tflite_inference_eng.hpp" +#include "std_msgs/msg/string.hpp" #include "deepracer_interfaces_pkg/srv/inference_state_srv.hpp" #include "deepracer_interfaces_pkg/srv/load_model_srv.hpp" @@ -44,6 +45,8 @@ namespace InferTask { /// @param nodeName Reference to the string containing name of the node. /// @param device Reference to the compute device (CPU, GPU, MYRIAD) public: + const char* MODEL_ARTIFACT_TOPIC = "model_artifact"; + InferenceNodeMgr(const std::string & nodeName) : Node(nodeName), deviceName_("CPU"), @@ -79,6 +82,9 @@ namespace InferTask { ::rmw_qos_profile_default, setInferenceStateServiceCbGrp_); + // Create a publisher to publish the images to run inference. + modelArtifactPub_ = this->create_publisher(MODEL_ARTIFACT_TOPIC, 1); + // Add all available task and algorithms to these hash maps. taskList_ = { {rlTask, nullptr} }; preProcessList_ = { {rgb, std::make_shared()}, @@ -147,7 +153,14 @@ namespace InferTask { RCLCPP_ERROR(this->get_logger(), "Unknown inference task"); return; } + itInferTask->second->loadModel(req->artifact_path.c_str(), itPreProcess->second, deviceName_); + + // Send a message to say we have loaded a model + std_msgs::msg::String modelArtifactMsg; + modelArtifactMsg.data = req->artifact_path; + modelArtifactPub_->publish(modelArtifactMsg); + res->error = 0; } } @@ -168,6 +181,9 @@ namespace InferTask { std::unordered_map> preProcessList_; /// Reference to the node handler. + /// ROS publisher object to publish the name of a new model. + rclcpp::Publisher::SharedPtr modelArtifactPub_; + /// Compute device type. std::string deviceName_; /// Inference Engine parameter. From e0169a4cb9fd253bf90d0a7e8bb6363bc2f64aef Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Sun, 17 Mar 2024 14:57:35 +0000 Subject: [PATCH 11/12] Remove verbose logging --- inference_pkg/src/tflite_inference_eng.cpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/inference_pkg/src/tflite_inference_eng.cpp b/inference_pkg/src/tflite_inference_eng.cpp index a268e0b..be4727e 100644 --- a/inference_pkg/src/tflite_inference_eng.cpp +++ b/inference_pkg/src/tflite_inference_eng.cpp @@ -281,11 +281,7 @@ namespace TFLiteInferenceEngine { // Last dimension of output is number of classes auto nClasses = outputDimsArr_[0].back(); - auto * outputData = output_tensors_[0]->data.f; - for (auto i = 0; i < nClasses; ++i) { - std::cout << std::to_string(i) << ": " << outputData[i] << std::endl; - } auto inferMsg = deepracer_interfaces_pkg::msg::InferResultsArray(); for (size_t i = 0; i < msg->images.size(); ++i) { From 4a2d996d71d1ba7361c9fcce62667aed3071076c Mon Sep 17 00:00:00 2001 From: Lars Ludvigsen Date: Sat, 4 May 2024 18:34:24 +0000 Subject: [PATCH 12/12] Fix segfault issue --- inference_pkg/src/tflite_inference_eng.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/inference_pkg/src/tflite_inference_eng.cpp b/inference_pkg/src/tflite_inference_eng.cpp index be4727e..500ba65 100644 --- a/inference_pkg/src/tflite_inference_eng.cpp +++ b/inference_pkg/src/tflite_inference_eng.cpp @@ -177,11 +177,6 @@ namespace TFLiteInferenceEngine { // Set the image processing algorithms imgProcess_ = imgProcess; - // Clean up vectors - inputNamesArr_.clear(); - outputDimsArr_.clear(); - output_tensors_.clear(); - // Load the model try { @@ -209,7 +204,7 @@ namespace TFLiteInferenceEngine { {"channels", input_tensor->dims->data[0]}}; paramsArr_.push_back(params_); - RCLCPP_INFO(inferenceNode->get_logger(), "Input name: %s", interpreter_->GetInputName(i)); + RCLCPP_INFO(inferenceNode->get_logger(), "Input name: %s", input_tensor->name); RCLCPP_INFO(inferenceNode->get_logger(), "Input dimensions: %i x %i x %i", input_tensor->dims->data[2], input_tensor->dims->data[1], input_tensor->dims->data[0]); } @@ -223,7 +218,7 @@ namespace TFLiteInferenceEngine { output_tensor->dims->data, output_tensor->dims->data + output_tensor->dims->size, std::back_inserter(dims)); - RCLCPP_INFO(inferenceNode->get_logger(), "Output name: %s", interpreter_->GetOutputName(o)); + RCLCPP_INFO(inferenceNode->get_logger(), "Output name: %s", output_tensor->name); outputDimsArr_.push_back(dims); }