diff --git a/src/backend_model_instance.h b/src/backend_model_instance.h index 037b3deb2..4dee54f03 100644 --- a/src/backend_model_instance.h +++ b/src/backend_model_instance.h @@ -174,7 +174,6 @@ class TritonModelInstance { std::deque model_instances_; std::thread backend_thread_; - std::atomic backend_thread_exit_; }; struct WarmupData { diff --git a/src/filesystem/implementations/common.h b/src/filesystem/implementations/common.h index 54afb284b..29346583b 100644 --- a/src/filesystem/implementations/common.h +++ b/src/filesystem/implementations/common.h @@ -97,6 +97,7 @@ class FileSystem { virtual Status MakeTemporaryDirectory( std::string dir_path, std::string* temp_dir) = 0; virtual Status DeletePath(const std::string& path) = 0; + virtual ~FileSystem() = default; }; // Helper function to take care of lack of trailing slashes diff --git a/src/infer_request.cc b/src/infer_request.cc index f5b3d3a30..17fe437f9 100644 --- a/src/infer_request.cc +++ b/src/infer_request.cc @@ -1644,8 +1644,8 @@ operator<<(std::ostream& out, const InferenceRequest::State& state) bool operator==( - const InferenceRequest::SequenceId lhs, - const InferenceRequest::SequenceId rhs) + const InferenceRequest::SequenceId& lhs, + const InferenceRequest::SequenceId& rhs) { if (lhs.Type() == rhs.Type()) { switch (lhs.Type()) { @@ -1663,8 +1663,8 @@ operator==( bool operator!=( - const InferenceRequest::SequenceId lhs, - const InferenceRequest::SequenceId rhs) + const InferenceRequest::SequenceId& lhs, + const InferenceRequest::SequenceId& rhs) { return !(lhs == rhs); } diff --git a/src/infer_request.h b/src/infer_request.h index 3096cfcd9..e93664d1d 100644 --- a/src/infer_request.h +++ b/src/infer_request.h @@ -257,6 +257,7 @@ class InferenceRequest { SequenceId(); SequenceId(const std::string& sequence_label); SequenceId(uint64_t sequence_index); + SequenceId(const SequenceId& other) = default; SequenceId& operator=(const SequenceId& rhs) = default; SequenceId& operator=(const std::string& rhs); SequenceId& operator=(const uint64_t rhs); @@ -275,8 +276,8 @@ class InferenceRequest { private: friend std::ostream& operator<<( std::ostream& out, const InferenceRequest::SequenceId& correlation_id); - friend bool operator==(const SequenceId lhs, const SequenceId rhs); - friend bool operator!=(const SequenceId lhs, const SequenceId rhs); + friend bool operator==(const SequenceId& lhs, const SequenceId& rhs); + friend bool operator!=(const SequenceId& lhs, const SequenceId& rhs); std::string sequence_label_; uint64_t sequence_index_; @@ -766,7 +767,6 @@ class InferenceRequest { // The model version as requested and based on version policy the // specific version that is actually used for inference. int64_t requested_model_version_; - int64_t actual_model_version_; std::string id_; @@ -815,11 +815,6 @@ class InferenceRequest { uint64_t cache_lookup_start_ns_; uint64_t cache_lookup_end_ns_; - // Cache insertion start/end timestamps. Cache manages its own stats even - // when statistics are not being colleceted. - uint64_t cache_insertion_start_ns_; - uint64_t cache_insertion_end_ns_; - // Dedicated timestamp for batcher internal which can diverge from // queue start timestamp to provide accurate queue time without affecting // batcher functionalities. @@ -864,8 +859,8 @@ std::ostream& operator<<( std::ostream& operator<<( std::ostream& out, const InferenceRequest::SequenceId& sequence_id); bool operator==( - const InferenceRequest::SequenceId lhs, - const InferenceRequest::SequenceId rhs); + const InferenceRequest::SequenceId& lhs, + const InferenceRequest::SequenceId& rhs); }} // namespace triton::core namespace std { diff --git a/src/memory.h b/src/memory.h index 65eb87177..bbcad78d6 100644 --- a/src/memory.h +++ b/src/memory.h @@ -66,6 +66,7 @@ class Memory { // Return the total byte size of the data buffer size_t TotalByteSize() const { return total_byte_size_; } + virtual ~Memory() = default; protected: Memory() : total_byte_size_(0), buffer_count_(0) {} size_t total_byte_size_; diff --git a/src/model_repository_manager/model_repository_manager.cc b/src/model_repository_manager/model_repository_manager.cc index 6a882e552..e07a6427e 100644 --- a/src/model_repository_manager/model_repository_manager.cc +++ b/src/model_repository_manager/model_repository_manager.cc @@ -373,7 +373,7 @@ ModelRepositoryManager::ModelRepositoryManager( return FindModelIdentifier(n, i); }; } else { - find_identifier_fn_ = [this](const std::string& n, ModelIdentifier* i) { + find_identifier_fn_ = [](const std::string& n, ModelIdentifier* i) { return Status::Success; }; } diff --git a/src/sequence_batch_scheduler/sequence_batch_scheduler.cc b/src/sequence_batch_scheduler/sequence_batch_scheduler.cc index 84361df10..9fa3c9cf5 100644 --- a/src/sequence_batch_scheduler/sequence_batch_scheduler.cc +++ b/src/sequence_batch_scheduler/sequence_batch_scheduler.cc @@ -1447,7 +1447,7 @@ DirectSequenceBatch::DirectSequenceBatch( const int nice = 0; NewPayload(); scheduler_thread_.reset( - new std::thread([this, nice]() { BatcherThread(nice); })); + new std::thread([this]() { BatcherThread(nice); })); *is_initialized = true; } diff --git a/src/sequence_batch_scheduler/sequence_batch_scheduler.h b/src/sequence_batch_scheduler/sequence_batch_scheduler.h index 179544e28..ed0ccb1e2 100644 --- a/src/sequence_batch_scheduler/sequence_batch_scheduler.h +++ b/src/sequence_batch_scheduler/sequence_batch_scheduler.h @@ -91,6 +91,7 @@ class SequenceBatchScheduler : public Scheduler { struct BatcherSequenceSlot { BatcherSequenceSlot() = default; BatcherSequenceSlot(const BatcherSequenceSlot&) = default; + BatcherSequenceSlot& operator=(const BatcherSequenceSlot&) = default; BatcherSequenceSlot(TritonModelInstance* i, uint32_t s) : model_instance_(i), seq_slot_(s) { diff --git a/src/sequence_batch_scheduler/sequence_utils.h b/src/sequence_batch_scheduler/sequence_utils.h index 54ba4ad64..049d508af 100644 --- a/src/sequence_batch_scheduler/sequence_utils.h +++ b/src/sequence_batch_scheduler/sequence_utils.h @@ -68,6 +68,7 @@ class Sequencer { // Sequencer will not reschedule requests return Status::Success; } + virtual ~Sequencer() = default; }; class IterativeSequencer : public Sequencer { diff --git a/src/tritonserver.cc b/src/tritonserver.cc index 272fd6cf1..c6cfb9acb 100644 --- a/src/tritonserver.cc +++ b/src/tritonserver.cc @@ -420,7 +420,7 @@ TritonServerOptions::AddRateLimiterResource( } auto ritr = ditr->second.find(name); if (ritr == ditr->second.end()) { - ditr->second.emplace(name, count).first; + ditr->second.emplace(name, count); } else { // If already present then store the minimum of the two. if (ritr->second > count) {