From 7c6e8d9d6d5f8c01e584539121144466ba63d09b Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 9 Jul 2021 15:28:43 +0800 Subject: [PATCH 01/34] Add code skeleton for new persistent-aware self_relative_ptr and swmr_map --- .../experimental/pa_self_relative_ptr.hpp | 739 ++++ .../libpmemobj++/experimental/swmr_map.hpp | 137 + .../experimental/swmr_skip_list_impl.hpp | 3157 +++++++++++++++++ tests/CMakeLists.txt | 3 + tests/ptr/pa_self_relative_ptr.cpp | 164 + 5 files changed, 4200 insertions(+) create mode 100644 include/libpmemobj++/experimental/pa_self_relative_ptr.hpp create mode 100644 include/libpmemobj++/experimental/swmr_map.hpp create mode 100644 include/libpmemobj++/experimental/swmr_skip_list_impl.hpp create mode 100644 tests/ptr/pa_self_relative_ptr.cpp diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp new file mode 100644 index 0000000000..73b5c019f8 --- /dev/null +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -0,0 +1,739 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +#ifndef LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP +#define LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP + +#include +#include +#include +#include + + +/* According to the definition of offset=real_offset-1, for 8-byte aligned + * allocation, the lower 3 bits of the stored offset are always 1 (except + * null_ptr). Therefore, the second lowest bit is used as the indicator of if + * the data pointed by the pa_self_relative_ptr + * (persistent-aware self_relative_ptr) needs explicit flush. + * Flush is needed if it is 0, not needed if it is 1. + * */ + +#define kFlushNeeded ~(1UL << 1) +// flag &= kFlushNeeded, to indicate it needs flush +#define FlushNeeded(offset) (!((offset >> 1) & 1U)) +// return true if needs explicit flush, false otherwise. + +namespace pmem +{ +namespace obj +{ +namespace experimental +{ + +template +class pa_self_relative_ptr; + +template <> +class pa_self_relative_ptr : public self_relative_ptr_base { +public: + using base_type = self_relative_ptr_base; + using this_type = pa_self_relative_ptr; + using element_type = void; + + constexpr pa_self_relative_ptr() noexcept = default; + + constexpr pa_self_relative_ptr(std::nullptr_t) noexcept + : self_relative_ptr_base() + { + } + + pa_self_relative_ptr(element_type *ptr) noexcept + : self_relative_ptr_base(self_offset(ptr)) + { + } + + inline element_type * + get() const noexcept + { + return static_cast( + self_relative_ptr_base::to_void_pointer()); + } + +private: + difference_type + self_offset(element_type *ptr) const noexcept + { + return base_type::pointer_to_offset(static_cast(ptr)); + } +}; + +template +class pa_self_relative_ptr : public self_relative_ptr_base { +public: + using base_type = self_relative_ptr_base; + using this_type = pa_self_relative_ptr; + using element_type = typename pmem::detail::sp_element::type; + + /** + * Random access iterator requirements (members) + */ + + /** + * The self_relative_ptr iterator category. + */ + using iterator_category = std::random_access_iterator_tag; + + /** + * The self_relative_ptr difference type. + */ + using difference_type = typename base_type::difference_type; + + /** + * The type of the value pointed to by the self_relative_ptr. + */ + using value_type = T; + + /** + * The reference type of the value pointed to by the self_relative_ptr. + */ + using reference = T &; + + /* + * Constructors + */ + /** + * Default constructor, equal the nullptr + */ + constexpr pa_self_relative_ptr() noexcept = default; + /** + * Volatile pointer constructor. + * + * @param ptr volatile pointer, pointing to persistent memory. + */ + pa_self_relative_ptr(element_type *ptr, bool flushNeeded = true) noexcept + : base_type(self_offset(ptr)) + { + uintptr_t mask = (flushNeeded == true); + --mask; + this->offset &= (mask | kFlushNeeded); + } + + /** + * Constructor from persistent_ptr + */ + pa_self_relative_ptr(persistent_ptr ptr, bool flushNeeded = true) noexcept + : base_type(self_offset(ptr.get())) + { + uintptr_t mask = (flushNeeded == true); + --mask; + this->offset &= (mask | kFlushNeeded); + } + + /** + * PMEMoid constructor. + * + * Provided for easy interoperability between C++ and C API's. + * + * @param oid C-style persistent pointer + */ + pa_self_relative_ptr(PMEMoid oid, bool flushNeeded = true) noexcept + : base_type(self_offset( + static_cast(pmemobj_direct(oid)))) + { + uintptr_t mask = (flushNeeded == true); + --mask; + this->offset &= (mask | kFlushNeeded); + } + + /** + * Copy constructor + */ + pa_self_relative_ptr(const pa_self_relative_ptr &ptr) noexcept + : base_type(ptr) + { + } + + /** + * Copy constructor from a different pa_self_relative_ptr<>. + * + * Available only for convertible, non-void types. + */ + template < + typename U, + typename = typename std::enable_if< + !std::is_same< + typename std::remove_cv::type, + typename std::remove_cv::type>::value && + !std::is_void::value, + decltype(static_cast(std::declval()))>::type> + pa_self_relative_ptr(pa_self_relative_ptr const &r) noexcept + : base_type(self_offset(static_cast(r.get()))) + { + } + + ~pa_self_relative_ptr() + { + verify_type(); + } + + /** + * Get the direct pointer. + * + * @return the direct pointer to the object. + */ + inline element_type * + get() const noexcept + { + return static_cast(base_type::offset_to_pointer( + this->offset | ~kFlushNeeded)); + } + + /** + * Conversion to persitent ptr + */ + persistent_ptr + to_persistent_ptr() const + { + return persistent_ptr{this->get()}; + } + + /** + * Check if flush is needed + */ + bool + flush_needed() const + { + return FlushNeeded(this->offset); + } + /** + * return offset for debug only + */ + offset_type + get_offset() const + { + return this->offset; + } + /* + * Operators + */ + + /** + * Bool conversion operator. + */ + explicit operator bool() const noexcept + { + return !is_null(); + } + + /** + * Conversion operator to persistent_ptr + */ + operator persistent_ptr() const + { + return to_persistent_ptr(); + } + + /** + * Dereference operator. + */ + typename pmem::detail::sp_dereference::type + operator*() const noexcept + { + return *(this->get()); + } + + /** + * Member access operator. + */ + typename pmem::detail::sp_member_access::type + operator->() const noexcept + { + return this->get(); + } + + /** + * Array access operator. + * + * Contains run-time bounds checking for static arrays. + */ + template ::value>> + typename pmem::detail::sp_array_access::type + operator[](difference_type i) const noexcept + { + assert(i >= 0 && + (i < pmem::detail::sp_extent::value || + pmem::detail::sp_extent::value == 0) && + "persistent array index out of bounds"); + + return this->get()[i]; + } + + /** + * Assignment operator. + * + * self-relative pointer assignment within a transaction + * automatically registers this operation so that a rollback + * is possible. + * + * @throw pmem::transaction_error when adding the object to the + * transaction failed. + */ + pa_self_relative_ptr & + operator=(const pa_self_relative_ptr &r) + { + this->base_type::operator=(r); + return *this; + } + + /** + * Converting assignment operator from a different + * pa_self_relative_ptr<>. + * + * Available only for convertible types. + * Just like regular assignment, also automatically registers + * itself in a transaction. + * + * @throw pmem::transaction_error when adding the object + * to the transaction failed. + */ + template ::value>::type> + pa_self_relative_ptr & + operator=(pa_self_relative_ptr const &r) + { + this_type(r).swap(*this); + return *this; + } + + /** + * Nullptr move assignment operator. + * + * @throw pmem::transaction_error when adding the object to the + * transaction failed. + */ + pa_self_relative_ptr &operator=(std::nullptr_t) + { + detail::conditional_add_to_tx(this); + this->offset = self_offset(nullptr); + return *this; + } + + /** + * Prefix increment operator. + */ + inline pa_self_relative_ptr & + operator++() + { + detail::conditional_add_to_tx(this); + uintptr_t mask = (this->flush_needed() == true); + --mask; + this->offset = (mask | kFlushNeeded) & + ((this->offset | ~kFlushNeeded) + + static_cast(sizeof(T))); + return *this; + } + + /** + * Postfix increment operator. + */ + inline pa_self_relative_ptr + operator++(int) + { + auto copy = *this; + ++(*this); + + return copy; + } + + /** + * Prefix decrement operator. + */ + inline pa_self_relative_ptr & + operator--() + { + detail::conditional_add_to_tx(this); + uintptr_t mask = (this->flush_needed() == true); + --mask; + this->offset = (mask | kFlushNeeded) & + ((this->offset | ~kFlushNeeded) + - static_cast(sizeof(T))); + return *this; + } + + /** + * Postfix decrement operator. + */ + inline pa_self_relative_ptr + operator--(int) + { + auto copy = *this; + --(*this); + + return copy; + } + + /** + * Addition assignment operator. + */ + inline pa_self_relative_ptr & + operator+=(std::ptrdiff_t s) + { + detail::conditional_add_to_tx(this); + uintptr_t mask = (this->flush_needed() == true); + --mask; + this->offset = (mask | kFlushNeeded) & + ((this->offset | ~kFlushNeeded) + + s * static_cast(sizeof(T))); + return *this; + } + + /** + * Subtraction assignment operator. + */ + inline pa_self_relative_ptr & + operator-=(std::ptrdiff_t s) + { + detail::conditional_add_to_tx(this); + uintptr_t mask = (this->flush_needed() == true); + --mask; + this->offset = (mask | kFlushNeeded) & + ((this->offset | ~kFlushNeeded) + - s * static_cast(sizeof(T))); + return *this; + } + + /** + * Conversion to byte pointer + */ + byte_ptr_type + to_byte_pointer() const noexcept + { + return static_cast(this->to_void_pointer()); + } + + /** + * Conversion to void* + */ + void * + to_void_pointer() const noexcept + { + return base_type::offset_to_pointer(this->offset | + ~kFlushNeeded); + } + + /** + * Explicit conversion operator to void* + */ + explicit + operator void *() const noexcept + { + return to_void_pointer(); + } + + /** + * Explicit conversion operator to byte pointer + */ + explicit operator byte_ptr_type() const noexcept + { + return to_byte_pointer(); + } + + /** + * Byte distance between two relative pointers + */ + static difference_type + distance_between(const pa_self_relative_ptr &first, + const pa_self_relative_ptr &second) + { + return second.to_byte_pointer() - first.to_byte_pointer(); + } +protected: + /** + * Verify if element_type is not polymorphic + */ + void + verify_type() + { + static_assert(!std::is_polymorphic::value, + "Polymorphic types are not supported"); + } + +private: + difference_type + self_offset(element_type *ptr) const noexcept + { + return base_type::pointer_to_offset(static_cast(ptr)); + } +}; + +/** + * Swaps two pa_self_relative_ptr objects of the same type. + * + * Non-member swap function as required by Swappable concept. + * en.cppreference.com/w/cpp/concept/Swappable + */ +template +inline void +swap(pa_self_relative_ptr &a, pa_self_relative_ptr &b) +{ + a.swap(b); +} + +/** + * Equality operator. + */ +template +inline bool +operator==(pa_self_relative_ptr const &lhs, + pa_self_relative_ptr const &rhs) noexcept +{ + return lhs.to_byte_pointer() == rhs.to_byte_pointer(); +} + +/** + * Inequality operator. + */ +template +inline bool +operator!=(pa_self_relative_ptr const &lhs, + pa_self_relative_ptr const &rhs) noexcept +{ + return !(lhs == rhs); +} + +/** + * Equality operator with nullptr. + */ +template +inline bool +operator==(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept +{ + return !bool(lhs); +} + +/** + * Equality operator with nullptr. + */ +template +inline bool +operator==(std::nullptr_t, pa_self_relative_ptr const &lhs) noexcept +{ + return !bool(lhs); +} + +/** + * Inequality operator with nullptr. + */ +template +inline bool +operator!=(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept +{ + return bool(lhs); +} + +/** + * Inequality operator with nullptr. + */ +template +inline bool +operator!=(std::nullptr_t, pa_self_relative_ptr const &lhs) noexcept +{ + return bool(lhs); +} + +/** + * Less than operator. + * + * @return true if the sum(this, offset) of lhs is less than the sum(this, + * offset) of rhs. Returns false otherwise. + */ +template +inline bool +operator<(pa_self_relative_ptr const &lhs, + pa_self_relative_ptr const &rhs) noexcept +{ + return lhs.to_byte_pointer() < rhs.to_byte_pointer(); +} + +/** + * Less or equal than operator. + * + * See less than operator for comparison rules. + */ +template +inline bool +operator<=(pa_self_relative_ptr const &lhs, + pa_self_relative_ptr const &rhs) noexcept +{ + return !(rhs < lhs); +} + +/** + * Greater than operator. + * + * See less than operator for comparison rules. + */ +template +inline bool +operator>(pa_self_relative_ptr const &lhs, + pa_self_relative_ptr const &rhs) noexcept +{ + return (rhs < lhs); +} + +/** + * Greater or equal than operator. + * + * See less than operator for comparison rules. + */ +template +inline bool +operator>=(pa_self_relative_ptr const &lhs, + pa_self_relative_ptr const &rhs) noexcept +{ + return !(lhs < rhs); +} + +/* nullptr comparisons */ + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator<(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept +{ + return std::less::element_type *>()( + lhs.get(), nullptr); +} + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator<(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept +{ + return std::less::element_type *>()( + nullptr, rhs.get()); +} + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator<=(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept +{ + return !(nullptr < lhs); +} + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator<=(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept +{ + return !(rhs < nullptr); +} + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator>(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept +{ + return nullptr < lhs; +} + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator>(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept +{ + return rhs < nullptr; +} + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator>=(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept +{ + return !(lhs < nullptr); +} + +/** + * Compare a pa_self_relative_ptr with a null pointer. + */ +template +inline bool +operator>=(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept +{ + return !(nullptr < rhs); +} + +/** + * Addition operator for self-relative pointers. + */ +template +inline pa_self_relative_ptr +operator+(pa_self_relative_ptr const &lhs, std::ptrdiff_t s) +{ + pa_self_relative_ptr ptr = lhs; + ptr += s; + return ptr; +} + +/** + * Subtraction operator for self-relative pointers. + */ +template +inline pa_self_relative_ptr +operator-(pa_self_relative_ptr const &lhs, std::ptrdiff_t s) +{ + pa_self_relative_ptr ptr = lhs; + ptr -= s; + return ptr; +} + +/** + * Subtraction operator for self-relative pointers of identical type. + * + * Calculates the offset difference. + * Calculating the difference of pointers from objects of + * different pools is not allowed. + */ +template ::type, + typename std::remove_cv::type>::value>> +inline ptrdiff_t +operator-(pa_self_relative_ptr const &lhs, + pa_self_relative_ptr const &rhs) +{ + return self_relative_ptr_base::distance_between(rhs, lhs) / + static_cast(sizeof(T)); +} + +/** + * Ostream operator + */ +template +std::ostream & +operator<<(std::ostream &os, pa_self_relative_ptr const &ptr) +{ + os << ptr.to_void_pointer(); + return os; +} + +} +} +} +#endif // LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP diff --git a/include/libpmemobj++/experimental/swmr_map.hpp b/include/libpmemobj++/experimental/swmr_map.hpp new file mode 100644 index 0000000000..9fa20f87c4 --- /dev/null +++ b/include/libpmemobj++/experimental/swmr_map.hpp @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +#ifndef PMEMOBJ_SWMR_MAP_HPP +#define PMEMOBJ_SWMR_MAP_HPP + +#include +#include +#include + +namespace pmem +{ +namespace obj +{ +namespace experimental +{ +/** + * + */ +template , + typename Allocator = + pmem::obj::allocator>> +class swmr_map + : public detail::swmr_skip_list> { + using traits_type = detail::map_traits; + using base_type = pmem::detail::swmr_skip_list; + +public: + using key_type = typename base_type::key_type; + using mapped_type = typename base_type::mapped_type; + using value_type = typename base_type::value_type; + using size_type = typename base_type::size_type; + using difference_type = typename base_type::difference_type; + using key_compare = Comp; + using allocator_type = Allocator; + using reference = typename base_type::reference; + using const_reference = typename base_type::const_reference; + using pointer = typename base_type::pointer; + using const_pointer = typename base_type::const_pointer; + using iterator = typename base_type::iterator; + using const_iterator = typename base_type::const_iterator; + + /** + * Default constructor. + */ + swmr_map() = default; + + /** + * Copy constructor. + */ + swmr_map(const swmr_map &table) : base_type(table) + { + } + + /** + * Move constructor. + */ + swmr_map(swmr_map &&table) : base_type(std::move(table)) + { + } + + /** + * Construct the empty map + */ + explicit swmr_map(const key_compare &comp, + const allocator_type &alloc = allocator_type()) + : base_type(comp, alloc) + { + } + + /** + * Constructs the map with the contents of the range [first, last). + */ + template + swmr_map(InputIt first, InputIt last, + const key_compare &comp = Comp(), + const allocator_type &alloc = allocator_type()) + : base_type(first, last, comp, alloc) + { + } + + /** + * Constructs the map with initializer list + */ + swmr_map(std::initializer_list ilist) + : base_type(ilist.begin(), ilist.end()) + { + } + + /** + * Assignment operator + */ + swmr_map & + operator=(const swmr_map &other) + { + return static_cast( + base_type::operator=(other)); + } + + /** + * Move-assignment operator + */ + swmr_map & + operator=(swmr_map &&other) + { + return static_cast( + base_type::operator=(std::move(other))); + } + + /** + * + */ + swmr_map & + operator=(std::initializer_list ilist) + { + return static_cast( + base_type::operator=(ilist)); + } +}; + +/** Non-member swap */ +template +void +swap(swmr_map &lhs, + swmr_map &rhs) +{ + lhs.swap(rhs); +} + +} /* namespace experimental */ +} /* namespace obj */ +} /* namespace pmem */ +#endif /* PMEMOBJ_SWMR_MAP_HPP */ diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp new file mode 100644 index 0000000000..9b985d8445 --- /dev/null +++ b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp @@ -0,0 +1,3157 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +#ifndef PMEMOBJ_SWMR_SKIP_LIST_IMPL_HPP +#define PMEMOBJ_SWMR_SKIP_LIST_IMPL_HPP + +#include +#include +#include +#include +#include +#include /* for std::unique_lock */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +namespace pmem +{ +namespace detail +{ + +template > +class swmr_skip_list_node { +public: + using value_type = Value; + using size_type = std::size_t; + using reference = value_type &; + using const_reference = const value_type &; + using pointer = value_type *; + using const_pointer = const value_type *; + using node_pointer = + obj::experimental::pa_self_relative_ptr; + using atomic_node_pointer = std::atomic; + using mutex_type = Mutex; + using lock_type = LockType; + + swmr_skip_list_node(size_type levels) : height_(levels) + { + for (size_type lev = 0; lev < height_; ++lev) + detail::create(&get_next(lev), + nullptr); + + assert(height() == levels); +#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED + /* + * Valgrind does not understand atomic semantic and reports + * false-postives in drd and helgrind tools. + */ + for (size_type lev = 0; lev < height_; ++lev) { + VALGRIND_HG_DISABLE_CHECKING(&get_next(lev), + sizeof(get_next(lev))); + } +#endif + } + + swmr_skip_list_node(size_type levels, const node_pointer *new_nexts) + : height_(levels) + { + for (size_type lev = 0; lev < height_; ++lev) + detail::create(&get_next(lev), + new_nexts[lev]); + + assert(height() == levels); +#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED + /* + * Valgrind does not understand atomic semantic and reports + * false-postives in drd and helgrind tools. + */ + for (size_type lev = 0; lev < height_; ++lev) { + VALGRIND_HG_DISABLE_CHECKING(&get_next(lev), + sizeof(get_next(lev))); + } +#endif + } + + ~swmr_skip_list_node() + { + for (size_type lev = 0; lev < height_; ++lev) + detail::destroy(get_next(lev)); + } + + swmr_skip_list_node(const swmr_skip_list_node &) = delete; + + swmr_skip_list_node &operator=(const swmr_skip_list_node &) = delete; + + pointer + get() noexcept + { + return &val; + } + + const_pointer + get() const noexcept + { + return &val; + } + + reference + value() + { + return *get(); + } + + node_pointer + next(size_type level) const + { + assert(level < height()); + return get_next(level).load(std::memory_order_acquire); + } + + /** + * Can`t be called concurrently + * Should be called inside a transaction + */ + void + set_next_tx(size_type level, node_pointer next) + { + assert(level < height()); + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + auto &node = get_next(level); + obj::flat_transaction::snapshot(&node); + node.store(next, std::memory_order_release); + } + + void + set_next(obj::pool_base pop, size_type level, node_pointer next) + { + assert(level < height()); + auto &node = get_next(level); + node.store(next, std::memory_order_release); + pop.persist(&node, sizeof(node)); + } + + void + set_nexts(const node_pointer *new_nexts, size_type h) + { + assert(h == height()); + auto *nexts = get_nexts(); + + for (size_type i = 0; i < h; i++) { + nexts[i].store(new_nexts[i], std::memory_order_relaxed); + } + } + + void + set_nexts(obj::pool_base pop, const node_pointer *new_nexts, + size_type h) + { + set_nexts(new_nexts, h); + + auto *nexts = get_nexts(); + pop.persist(nexts, sizeof(nexts[0]) * h); + } + + /** @return number of layers */ + size_type + height() const + { + return height_; + } + + lock_type + acquire() + { + return lock_type(mutex); + } + +private: + atomic_node_pointer * + get_nexts() + { + return reinterpret_cast(this + 1); + } + + atomic_node_pointer & + get_next(size_type level) + { + auto *arr = get_nexts(); + return arr[level]; + } + + const atomic_node_pointer & + get_next(size_type level) const + { + auto *arr = + reinterpret_cast(this + 1); + return arr[level]; + } + + mutex_type mutex; + union { + value_type val; + }; + size_type height_; +}; + +template +class swmr_skip_list_iterator { + using node_type = NodeType; + using node_ptr = typename std::conditional::type; + friend class swmr_skip_list_iterator; + +public: + using value_type = typename node_type::value_type; + using iterator_category = std::forward_iterator_tag; + using difference_type = std::ptrdiff_t; + using reference = + typename std::conditional::type; + using pointer = typename std::conditional::type; + + swmr_skip_list_iterator() : node(nullptr) + { + } + + /** Copy constructor. */ + swmr_skip_list_iterator(const swmr_skip_list_iterator &other) : node(other.node) + { + } + + /** Copy constructor for const iterator from non-const iterator */ + template ::type> + swmr_skip_list_iterator(const swmr_skip_list_iterator &other) + : node(other.node) + { + } + + reference operator*() const + { + return *(node->get()); + } + + pointer operator->() const + { + return node->get(); + } + + swmr_skip_list_iterator & + operator++() + { + assert(node != nullptr); + node = node->next(0).get(); + return *this; + } + + swmr_skip_list_iterator + operator++(int) + { + swmr_skip_list_iterator tmp = *this; + ++*this; + return tmp; + } + + swmr_skip_list_iterator & + operator=(const swmr_skip_list_iterator &other) + { + node = other.node; + return *this; + } + +private: + explicit swmr_skip_list_iterator(node_type *n) : node(n) + { + } + + template ::type> + explicit swmr_skip_list_iterator(const node_type *n) : node(n) + { + } + + node_ptr node; + + template + friend class concurrent_skip_list; + + template + friend bool operator==(const swmr_skip_list_iterator &lhs, + const swmr_skip_list_iterator &rhs); + + template + friend bool operator!=(const swmr_skip_list_iterator &lhs, + const swmr_skip_list_iterator &rhs); +}; + +template +bool +operator==(const swmr_skip_list_iterator &lhs, + const swmr_skip_list_iterator &rhs) +{ + return lhs.node == rhs.node; +} + +template +bool +operator!=(const swmr_skip_list_iterator &lhs, + const swmr_skip_list_iterator &rhs) +{ + return lhs.node != rhs.node; +} + +/** + * Persistent memory aware implementation of the concurrent skip list. The + * implementation is based on the lock-based concurrent skip list algorithm + * described in + * https://www.cs.tau.ac.il/~shanir/nir-pubs-web/Papers/OPODIS2006-BA.pdf. + * + * Our concurrent skip list implementation supports concurrent insertion and + * traversal, but not concurrent erasure. The erase method is prefixed with + * unsafe_, to indicate that there is no concurrency safety. + * + * Each time, the pool with swmr_skip_list is being opened, the + * swmr_skip_list requires runtime_initialize() to be called in order to + * restore the state after process restart. + * + * Traits template parameter allows to specify properties of the + * concurrent_ski_list. The Traits type should has the following member types: + * * key_type - type of the key + * * mapped_type - type of the mapped_value + * * value_type - type of the value stored inside the skip list node (e.g. + * pair). + * * compare_type - The comparison functor used to sort elements in the skip + * list. + * * allocator_type - The type of allocator used by the skip list. + * * max_level - The constant value which specify the number of layers in the + * skip list. + * * random_generator_type - The type of random generator used by the skip list. + * It should be thread-safe. + */ +template +class swmr_skip_list { +protected: + using traits_type = Traits; + using key_type = typename traits_type::key_type; + using mapped_type = typename traits_type::mapped_type; + using value_type = typename traits_type::value_type; + using size_type = std::size_t; + using difference_type = std::ptrdiff_t; + using key_compare = typename traits_type::compare_type; + using allocator_type = typename traits_type::allocator_type; + using allocator_traits_type = std::allocator_traits; + + using reference = value_type &; + using const_reference = const value_type &; + using pointer = typename allocator_traits_type::pointer; + using const_pointer = typename allocator_traits_type::const_pointer; + + using list_node_type = swmr_skip_list_node; + + using iterator = swmr_skip_list_iterator; + using const_iterator = swmr_skip_list_iterator; + + static constexpr size_type MAX_LEVEL = traits_type::max_level; + + using random_level_generator_type = geometric_level_generator< + typename traits_type::random_generator_type, MAX_LEVEL>; + using node_allocator_type = typename std::allocator_traits< + allocator_type>::template rebind_alloc; + using node_allocator_traits = typename std::allocator_traits< + allocator_type>::template rebind_traits; + using node_ptr = list_node_type *; + using const_node_ptr = const list_node_type *; + using persistent_node_ptr = + obj::experimental::pa_self_relative_ptr; + + using prev_array_type = std::array; + using next_array_type = std::array; + using node_lock_type = typename list_node_type::lock_type; + using lock_array = std::array; + +public: + static constexpr bool allow_multimapping = + traits_type::allow_multimapping; + + /** + * Default constructor. Construct empty skip list. + * + * @pre must be called in transaction scope. + * @throw pmem::pool_error if an object is not in persistent memory. + * @throw pmem::transaction_scope_error if constructor wasn't called in + * transaction. + */ + swmr_skip_list() + { + check_tx_stage_work(); + init(); + } + + /** + * Constructs an empty container. + * + * @param[in] comp comparison function object to use for all comparisons + * of keys. + * @param[in] alloc allocator to use for all memory allocations of this + * container. + * + * @pre must be called in transaction scope. + * + * @throw pmem::pool_error if an object is not in persistent memory. + * @throw pmem::transaction_scope_error if constructor wasn't called in + * transaction. + * @throw pmem::transaction_alloc_error when allocating memory for + * inserted elements in transaction failed. + */ + explicit swmr_skip_list( + const key_compare &comp, + const allocator_type &alloc = allocator_type()) + : _node_allocator(alloc), _compare(comp) + { + check_tx_stage_work(); + init(); + } + + /** + * Constructs the container with the contents of the range [first, + * last). If multiple elements in the range have keys that compare + * equivalent, the first element is inserted. + * + * @param[in] first first iterator of inserted range. + * @param[in] last last iterator of inserted range. + * @param[in] comp comparison function object to use for all comparisons + * of keys. + * @param[in] alloc allocator to use for all memory allocations of this + * container. + * + * InputIt must meet the requirements of LegacyInputIterator. + * + * @pre must be called in transaction scope. + * + * @throw pmem::pool_error if an object is not in persistent memory. + * @throw pmem::transaction_scope_error if constructor wasn't called in + * transaction. + * @throw pmem::transaction_alloc_error when allocating memory for + * inserted elements in transaction failed. + * @throw rethrows element constructor exception. + */ + template + swmr_skip_list(InputIt first, InputIt last, + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) + : _node_allocator(alloc), _compare(comp) + { + check_tx_stage_work(); + init(); + while (first != last) + internal_unsafe_emplace(*first++); + } + + /** + * Copy constructor. Constructs the container with the copy of the + * contents of other. + * + * @param[in] other reference to the swmr_skip_list to be copied. + * + * @pre must be called in transaction scope. + * + * @post size() == other.size() + * + * @throw pmem::pool_error if an object is not in persistent memory. + * @throw pmem::transaction_alloc_error when allocating memory for + * copied elements in transaction failed. + * @throw pmem::transaction_scope_error if constructor wasn't called in + * transaction. + * @throw rethrows element constructor exception. + */ + swmr_skip_list(const swmr_skip_list &other) + : _node_allocator(node_allocator_traits:: + select_on_container_copy_construction( + other._node_allocator)), + _compare(other._compare), + _rnd_generator(other._rnd_generator) + { + check_tx_stage_work(); + init(); + internal_copy(other); + assert(_size == other._size); + } + + /** + * Copy constructor. Constructs the container with the copy of the + * contents of other. + * + * @param[in] other reference to the swmr_skip_list to be copied. + * @param[in] alloc allocator to use for all memory allocations of this + * container. + * + * @pre must be called in transaction scope. + * + * @post size() == other.size() + * + * @throw pmem::pool_error if an object is not in persistent memory. + * @throw pmem::transaction_alloc_error when allocating memory for + * copied elements in transaction failed. + * @throw pmem::transaction_scope_error if constructor wasn't called in + * transaction. + * @throw rethrows element constructor exception. + */ + swmr_skip_list(const swmr_skip_list &other, + const allocator_type &alloc) + : _node_allocator(alloc), + _compare(other._compare), + _rnd_generator(other._rnd_generator) + { + check_tx_stage_work(); + init(); + internal_copy(other); + assert(_size == other._size); + } + + /** + * Move constructor. Constructs the container with the contents of other + * using move semantics. Allocator is obtained by move-construction from + * the allocator belonging to other + * + * @param[in] other reference to the swmr_skip_list to be copied. + * + * @pre must be called in transaction scope. + * + * @post size() == other.size() + * + * @throw pmem::pool_error if an object is not in persistent memory. + * @throw pmem::transaction_alloc_error when allocating memory for + * copied elements in transaction failed. + * @throw pmem::transaction_scope_error if constructor wasn't called in + * transaction. + * @throw rethrows element constructor exception. + */ + swmr_skip_list(swmr_skip_list &&other) + : _node_allocator(std::move(other._node_allocator)), + _compare(other._compare), + _rnd_generator(other._rnd_generator) + { + check_tx_stage_work(); + init(); + internal_move(std::move(other)); + } + + /** + * Move constructor. Constructs the container with the contents of other + * using move semantics. + * + * @param[in] other reference to the swmr_skip_list to be copied. + * @param[in] alloc allocator to use for all memory allocations of this + * container. + * + * @pre must be called in transaction scope. + * + * @post size() == other.size() + * + * @throw pmem::pool_error if an object is not in persistent memory. + * @throw pmem::transaction_alloc_error when allocating memory for + * copied elements in transaction failed. + * @throw pmem::transaction_scope_error if constructor wasn't called in + * transaction. + * @throw rethrows element constructor exception. + */ + swmr_skip_list(swmr_skip_list &&other, + const allocator_type &alloc) + : _node_allocator(alloc), + _compare(other._compare), + _rnd_generator(other._rnd_generator) + { + check_tx_stage_work(); + init(); + if (alloc == other.get_allocator()) { + internal_move(std::move(other)); + } else { + init(); + internal_copy(std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end())); + } + } + + /** + * Initialize swmr_skip_list after process restart. + * MUST be called every time after process restart. + * Not thread safe. + * + */ + void + runtime_initialize() + { + tls_restore(); + + assert(this->size() == + size_type(std::distance(this->begin(), this->end()))); + } + + /** + * Should be called before swmr_skip_list destructor is called. + * Otherwise, program can terminate if an exception occurs while freeing + * memory inside dtor. + * + * The skip list map can NOT be used after free_data() was called + * (unless it was called in a transaction and that transaction aborted). + * + * @throw std::transaction_error in case of PMDK transaction failure + * @throw pmem::transaction_free_error when freeing underlying memory + * failed. + */ + void + free_data() + { + if (dummy_head == nullptr) + return; + + auto pop = get_pool_base(); + obj::flat_transaction::run(pop, [&] { + clear(); + delete_dummy_head(); + }); + } + + /** + * Destructor. + * free_data should be called before swmr_skip_list + * destructor is called. Otherwise, program can terminate if + * an exception occurs while freeing memory inside dtor. + * + * The skip list map can NOT be used after free_data() was called + * (unless it was called in a transaction and that transaction aborted). + */ + ~swmr_skip_list() + { + try { + free_data(); + } catch (...) { + std::terminate(); + } + } + + /** + * Copy assignment operator. Replaces the contents with a copy of the + * contents of other transactionally. If + * std::allocator_traits::propagate_on_container_copy_assignment::value + * is true, the target allocator is replaced by a copy of the source + * allocator. + * + * @post size() == other.size() + * + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_free_error when freeing old existing + * elements failed. + * @throw rethrows constructor exception. + */ + swmr_skip_list & + operator=(const swmr_skip_list &other) + { + if (this == &other) + return *this; + + obj::pool_base pop = get_pool_base(); + obj::flat_transaction::run(pop, [&] { + using pocca_t = typename node_allocator_traits:: + propagate_on_container_copy_assignment; + clear(); + allocator_copy_assignment(_node_allocator, + other._node_allocator, + pocca_t()); + _compare = other._compare; + _rnd_generator = other._rnd_generator; + + internal_copy(other); + }); + return *this; + } + + /** + * Move assignment operator. Replaces the contents with those of other + * using move semantics (i.e. the data in other is moved from other into + * this container). other is in a valid but unspecified state + * afterwards. If + * std::allocator_traits::propagate_on_container_move_assignment::value + * is true, the target allocator is replaced by a copy of the source + * allocator. If it is false and the source and the target allocators do + * not compare equal, the target cannot take ownership of the source + * memory and must move-assign each element individually, allocating + * additional memory using its own allocator as needed. In any case, all + * elements originally present in *this are either destroyed or replaced + * by elementwise move-assignment. + * + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_free_error when freeing old existing + * elements failed. + * @throw rethrows constructor exception. + */ + swmr_skip_list & + operator=(swmr_skip_list &&other) + { + if (this == &other) + return *this; + + obj::pool_base pop = get_pool_base(); + obj::flat_transaction::run(pop, [&] { + using pocma_t = typename node_allocator_traits:: + propagate_on_container_move_assignment; + clear(); + if (pocma_t::value || + _node_allocator == other._node_allocator) { + delete_dummy_head(); + allocator_move_assignment(_node_allocator, + other._node_allocator, + pocma_t()); + _compare = other._compare; + _rnd_generator = other._rnd_generator; + internal_move(std::move(other)); + } else { + internal_copy( + std::make_move_iterator(other.begin()), + std::make_move_iterator(other.end())); + } + }); + return *this; + } + + /** + * Replaces the contents with those identified by initializer list il. + * + * @param[in] il initializer list to use as data source + * + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_free_error when freeing old existing + * elements failed. + * @throw rethrows constructor exception. + */ + swmr_skip_list & + operator=(std::initializer_list il) + { + obj::pool_base pop = get_pool_base(); + obj::flat_transaction::run(pop, [&] { + clear(); + for (auto it = il.begin(); it != il.end(); ++it) + internal_unsafe_emplace(*it); + }); + return *this; + } + + /** + * Inserts value in a thread-safe way. No iterators or references are + * invalidated. + * + * @param[in] value element value to insert. + * + * @return a pair consisting of an iterator to the inserted element (or + * to the element that prevented the insertion) and a bool denoting + * whether the insertion took place. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + std::pair + insert(const value_type &value) + { + return internal_insert(value.first, value); + } + + /** + * Inserts value. No iterators or references are invalidated. + * This overload is equivalent to emplace(std::forward

(value)) and + * only participates in overload resolution if + * std::is_constructible::value == true. + * + * @param[in] value element value to insert. + * + * @return a pair consisting of an iterator to the inserted element (or + * to the element that prevented the insertion) and a bool denoting + * whether the insertion took place. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + template ::value>::type> + std::pair + insert(P &&value) + { + return emplace(std::forward

(value)); + } + + /** + * Inserts value using move semantic. No iterators or references are + * invalidated. + * + * @param[in] value element value to insert. + * + * @return a pair consisting of an iterator to the inserted element (or + * to the element that prevented the insertion) and a bool denoting + * whether the insertion took place. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + std::pair + insert(value_type &&value) + { + return internal_insert(value.first, std::move(value)); + } + + /** + * Inserts value in the position as close as possible, just prior to + * hint. No iterators or references are invalidated. + * + * @param[in] hint iterator to the position before which the new element + * will be inserted. + * @param[in] value element value to insert. + * + * @return an iterator to the inserted element, or to the element that + * prevented the insertion. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + iterator + insert(const_iterator hint, const_reference value) + { + /* Ignore hint */ + return insert(value).first; + } + + /** + * Inserts value in the position as close as possible, just prior to + * hint. No iterators or references are invalidated. + * This overload is equivalent to emplace_hint(hint, + * std::forward

(value)) and only participates in overload resolution + * if std::is_constructible::value == true. + * + * @param[in] hint iterator to the position before which the new element + * will be inserted. + * @param[in] value element value to insert. + * + * @return an iterator to the inserted element, or to the element that + * prevented the insertion. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + template ::value>::type> + iterator + insert(const_iterator hint, P &&value) + { + return emplace_hint(hint, std::forward

(value)); + } + + /** + * Inserts elements from range [first, last). If multiple elements in + * the range have keys that compare equivalent, the first one is + * inserted. + * + * @param[in] first first iterator of inserted range. + * @param[in] last last iterator of inserted range. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + template + void + insert(InputIterator first, InputIterator last) + { + for (InputIterator it = first; it != last; ++it) + insert(*it); + } + + /** + * Inserts elements from initializer list ilist. If multiple elements in + * the range have keys that compare equivalent, the first one is + * inserted. + * + * @param[in] ilist first initializer list to insert the values from. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + void + insert(std::initializer_list ilist) + { + insert(ilist.begin(), ilist.end()); + } + + /** + * Inserts a new element into the container constructed in-place with + * the given args if there is no element with the key in the container. + * + * Careful use of emplace allows the new element to be constructed while + * avoiding unnecessary copy or move operations. The constructor of the + * new element (i.e. std::pair) is called with exactly the + * same arguments as supplied to emplace, forwarded via + * std::forward(args).... The element may be constructed even if + * there already is an element with the key in the container, in which + * case the newly constructed element will be destroyed immediately. + * + * No iterators or references are invalidated. + * + * @param[in] args arguments to forward to the constructor of the + * element + * + * @return a pair consisting of an iterator to the inserted element (or + * to the element that prevented the insertion) and a bool denoting + * whether the insertion took place. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + template + std::pair + emplace(Args &&... args) + { + return internal_emplace(std::forward(args)...); + } + + /** + * Inserts a new element to the container as close as possible to the + * position just before hint. The element is constructed in-place, i.e. + * no copy or move operations are performed. + * + * The constructor of the element type (value_type, that is, + * std::pair) is called with exactly the same arguments as + * supplied to the function, forwarded with std::forward(args)... + * + * No iterators or references are invalidated. + * + * @param[in] hint iterator to the position before which the new element + * will be inserted. + * @param[in] args arguments to forward to the constructor of the + * element. + * + * @return Returns an iterator to the newly inserted element. + * + * If the insertion failed because the element already exists, returns + * an iterator to the already existing element with the equivalent key. + * + * @return an iterator to the inserted element, or to the element that + * prevented the insertion. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + template + iterator + emplace_hint(const_iterator hint, Args &&... args) + { + /* Ignore hint */ + return emplace(std::forward(args)...).first; + } + + /** + * If a key equivalent to k already exists in the container, does + * nothing. Otherwise, behaves like emplace except that the element is + * constructed as value_type(std::piecewise_construct, + * std::forward_as_tuple(k), + * std::forward_as_tuple(std::forward(args)...)) + * + * No iterators or references are invalidated. + * + * @param[in] k the key used both to look up and to insert if not found. + * @param[in] args arguments to forward to the constructor of the + * element. + * + * @return a pair consisting of an iterator to the inserted element (or + * to the element that prevented the insertion) and a bool denoting + * whether the insertion took place. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + template + std::pair + try_emplace(const key_type &k, Args &&... args) + { + return internal_try_emplace(k, std::forward(args)...); + } + + /** + * If a key equivalent to k already exists in the container, does + * nothing. Otherwise, behaves like emplace except that the element is + * constructed as value_type(std::piecewise_construct, + * std::forward_as_tuple(std::move(k)), + * std::forward_as_tuple(std::forward(args)...)). + * + * No iterators or references are invalidated. + * + * @param[in] k the key used both to look up and to insert if not found. + * @param[in] args arguments to forward to the constructor of the + * element. + * + * @return a pair consisting of an iterator to the inserted element (or + * to the element that prevented the insertion) and a bool denoting + * whether the insertion took place. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw pmem::transaction_scope_error if called inside transaction. + * @throw rethrows constructor exception. + */ + template + std::pair + try_emplace(key_type &&k, Args &&... args) + { + return internal_try_emplace(std::move(k), + std::forward(args)...); + } + + /** + * If a key equivalent to k already exists in the container, does + * nothing. Otherwise, behaves like emplace except that the element is + * constructed as value_type(std::piecewise_construct, + * std::forward_as_tuple(std::move(k)), + * std::forward_as_tuple(std::forward(args)...)). + * This overload only participates in overload resolution if the + * qualified-id Compare::is_transparent is valid and denotes a type and + * std::is_constructible::value == true . It allows calling + * this function without constructing an instance of Key. + * + * No iterators or references are invalidated. + * + * @param[in] k the key used both to look up and to insert if not found. + * @param[in] args arguments to forward to the constructor of the + * element. + * + * @return a pair consisting of an iterator to the inserted element (or + * to the element that prevented the insertion) and a bool denoting + * whether the insertion took place. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw pmem::transaction_alloc_error when allocating new memory + * failed. + * @throw rethrows constructor exception. + */ + template + typename std::enable_if< + has_is_transparent::value && + std::is_constructible::value, + std::pair>::type + try_emplace(K &&k, Args &&... args) + { + return internal_try_emplace(std::forward(k), + std::forward(args)...); + } + + /** + * Removes the element at pos from the container. + * References and iterators to the erased elements are invalidated. + * Other references and iterators are not affected. + * + * @pre The iterator pos must be valid and dereferenceable. Thus the + * end() iterator (which is valid, but is not dereferenceable) cannot be + * used as a value for pos. + * + * @param[in] pos iterator to the element to remove. + * + * @return iterator following the removed element. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw rethrows destructor exception. + */ + iterator + unsafe_erase(iterator pos) + { + check_outside_tx(); + auto &size_diff = tls_data.local().size_diff; + return internal_erase(pos, size_diff); + } + + /** + * Removes the element at pos from the container. + * References and iterators to the erased elements are invalidated. + * Other references and iterators are not affected. + * + * @pre The iterator pos must be valid and dereferenceable. Thus the + * end() iterator (which is valid, but is not dereferenceable) cannot be + * used as a value for pos. + * + * @param[in] pos iterator to the element to remove. + * + * @return iterator following the removed element. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw rethrows destructor exception. + */ + iterator + unsafe_erase(const_iterator pos) + { + return unsafe_erase(get_iterator(pos)); + } + + /** + * Removes the elements in the range [first; last), which must be a + * valid range in *this. + * References and iterators to the erased elements are invalidated. + * Other references and iterators are not affected. + * + * @param[in] first first iterator in the range of elements to remove. + * @param[in] last last iterator in the range of elements to remove. + * + * @return iterator following the last removed element. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw rethrows destructor exception. + */ + iterator + unsafe_erase(const_iterator first, const_iterator last) + { + check_outside_tx(); + obj::pool_base pop = get_pool_base(); + auto &size_diff = tls_data.local().size_diff; + + obj::flat_transaction::run(pop, [&] { + while (first != last) { + first = internal_erase(first, size_diff); + } + }); + + return get_iterator(first); + } + + /** + * Removes the element (if one exists) with the key equivalent to key. + * References and iterators to the erased elements are invalidated. + * Other references and iterators are not affected. + * + * @param[in] key key value of the elements to remove. + * + * @return Number of elements removed. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw rethrows destructor exception. + */ + size_type + unsafe_erase(const key_type &key) + { + std::pair range = equal_range(key); + size_type sz = static_cast( + std::distance(range.first, range.second)); + unsafe_erase(range.first, range.second); + return sz; + } + + /** + * Removes the element (if one exists) with the key equivalent to key. + * References and iterators to the erased elements are invalidated. + * Other references and iterators are not affected. + * This overload only participates in overload resolution if the + * qualified-id Compare::is_transparent is valid and denotes a type and + * std::is_convertible::value != true && + * std::is_convertible::value != true. + * It allows calling this function without constructing an instance of + * Key. + * + * @param[in] key key value of the elements to remove. + * + * @return Number of elements removed. + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw rethrows destructor exception. + */ + template < + typename K, + typename = typename std::enable_if< + has_is_transparent::value && + !std::is_convertible::value && + !std::is_convertible::value, + K>::type> + size_type + unsafe_erase(const K &key) + { + std::pair range = equal_range(key); + size_type sz = static_cast( + std::distance(range.first, range.second)); + unsafe_erase(range.first, range.second); + return sz; + } + + /** + * Returns an iterator pointing to the first element that is not less + * than (i.e. greater or equal to) key. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + iterator + lower_bound(const key_type &key) + { + return internal_get_bound(key, _compare); + } + + /** + * Returns an iterator pointing to the first element that is not less + * than (i.e. greater or equal to) key. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + const_iterator + lower_bound(const key_type &key) const + { + return internal_get_bound(key, _compare); + } + + /** + * Returns an iterator pointing to the first element that compares not + * less (i.e. greater or equal) to the value x. This overload only + * participates in overload resolution if the qualified-id + * Compare::is_transparent is valid and denotes a type. They allow + * calling this function without constructing an instance of Key. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + iterator + lower_bound(const K &x) + { + return internal_get_bound(x, _compare); + } + + /** + * Returns an iterator pointing to the first element that compares not + * less (i.e. greater or equal) to the value x. This overload only + * participates in overload resolution if the qualified-id + * Compare::is_transparent is valid and denotes a type. They allow + * calling this function without constructing an instance of Key. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + const_iterator + lower_bound(const K &x) const + { + return internal_get_bound(x, _compare); + } + + /** + * Returns an iterator pointing to the first element that is not less + * than (i.e. greater or equal to) key. Equivalent of lower_bound. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + iterator + find_higher_eq(const key_type &key) + { + return internal_get_bound(key, _compare); + } + + /** + * Returns an iterator pointing to the first element that is not less + * than (i.e. greater or equal to) key. Equivalent of lower_bound. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + const_iterator + find_higher_eq(const key_type &key) const + { + return internal_get_bound(key, _compare); + } + + /** + * Returns an iterator pointing to the first element that compares not + * less (i.e. greater or equal) to the value x. This overload only + * participates in overload resolution if the qualified-id + * Compare::is_transparent is valid and denotes a type. They allow + * calling this function without constructing an instance of Key. + * Equivalent of lower_bound. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + iterator + find_higher_eq(const K &x) + { + return internal_get_bound(x, _compare); + } + + /** + * Returns an iterator pointing to the first element that compares not + * less (i.e. greater or equal) to the value x. This overload only + * participates in overload resolution if the qualified-id + * Compare::is_transparent is valid and denotes a type. They allow + * calling this function without constructing an instance of Key. + * Equivalent of lower_bound. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is not less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + const_iterator + find_higher_eq(const K &x) const + { + return internal_get_bound(x, _compare); + } + + /** + * Returns an iterator pointing to the first element that is greater + * than key. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + iterator + upper_bound(const key_type &key) + { + return internal_get_bound(key, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the first element that is greater + * than key. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + const_iterator + upper_bound(const key_type &key) const + { + return internal_get_bound(key, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the first element that compares + * greater to the value x. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + iterator + upper_bound(const K &x) + { + return internal_get_bound(x, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the first element that compares + * greater to the value x. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + const_iterator + upper_bound(const K &x) const + { + return internal_get_bound(x, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the first element that is greater + * than key. Equivalent of upper_bound. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + iterator + find_higher(const key_type &key) + { + return internal_get_bound(key, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the first element that is greater + * than key. Equivalent of upper_bound. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + const_iterator + find_higher(const key_type &key) const + { + return internal_get_bound(key, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the first element that compares + * greater to the value x. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. Equivalent of upper_bound. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + iterator + find_higher(const K &x) + { + return internal_get_bound(x, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the first element that compares + * greater to the value x. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. Equivalent of upper_bound. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return Iterator pointing to the first element that is greater than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + const_iterator + find_higher(const K &x) const + { + return internal_get_bound(x, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the biggest element that is less + * than key. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the biggest element that is less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + iterator + find_lower(const key_type &key) + { + auto it = internal_get_biggest_less_than(key, _compare); + return iterator( + const_cast(it.node)); + } + + /** + * Returns a const iterator pointing to the biggest element that is less + * than key. + * + * @param[in] key key value to compare the elements to. + * + * @return Const iterator pointing to the biggest element that is less + * than key. If no such element is found, a past-the-end iterator is + * returned. + */ + const_iterator + find_lower(const key_type &key) const + { + return internal_get_biggest_less_than(key, _compare); + } + + /** + * Returns an iterator pointing to the biggest element that is less + * than key. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. + * + * @param[in] key alternative value that can be compared to Key. + * + * @return Iterator pointing to the biggest element that is less than + * key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + iterator + find_lower(const K &key) + { + auto it = internal_get_biggest_less_than(key, _compare); + return iterator( + const_cast(it.node)); + } + + /** + * Returns a const iterator pointing to the biggest element that is less + * than key. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. + * + * @param[in] key alternative value that can be compared to Key. + * + * @return Const iterator pointing to the biggest element that is less + * than key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + const_iterator + find_lower(const K &key) const + { + return internal_get_biggest_less_than(key, _compare); + } + + /** + * Returns an iterator pointing to the biggest element that is less + * than or equal to key. + * + * @param[in] key key value to compare the elements to. + * + * @return Iterator pointing to the biggest element that is less than + * or equal to key. If no such element is found, a past-the-end iterator + * is returned. + */ + iterator + find_lower_eq(const key_type &key) + { + auto it = internal_get_biggest_less_than( + key, not_greater_compare(_compare)); + return iterator( + const_cast(it.node)); + } + + /** + * Returns a const iterator pointing to the biggest element that is less + * than or equal to key. + * + * @param[in] key key value to compare the elements to. + * + * @return Const iterator pointing to the biggest element that is less + * than or equal to key. If no such element is found, a past-the-end + * iterator is returned. + */ + const_iterator + find_lower_eq(const key_type &key) const + { + return internal_get_biggest_less_than( + key, not_greater_compare(_compare)); + } + + /** + * Returns an iterator pointing to the biggest element that is less + * than or equal to key. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. + * + * @param[in] key alternative value that can be compared to Key. + * + * @return Iterator pointing to the biggest element that is less than or + * equal to key. If no such element is found, a past-the-end iterator is + * returned. + */ + template ::value, K>::type> + iterator + find_lower_eq(const K &key) + { + auto it = internal_get_biggest_less_than( + key, not_greater_compare(_compare)); + return iterator( + const_cast(it.node)); + } + + /** + * Returns a const iterator pointing to the biggest element that is less + * than or equal to key. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. + * + * @param[in] key alternative value that can be compared to Key. + * + * @return Const iterator pointing to the biggest element that is less + * than or equal to key. If no such element is found, a past-the-end + * iterator is returned. + */ + template ::value, K>::type> + const_iterator + find_lower_eq(const K &key) const + { + return internal_get_biggest_less_than( + key, not_greater_compare(_compare)); + } + + /** + * Finds an element with key equivalent to key. + * + * @param[in] key key value of the element to search for. + * + * @return Iterator to an element with key equivalent to key. If no such + * element is found, past-the-end iterator is returned. + */ + iterator + find(const key_type &key) + { + return internal_find(key); + } + + /** + * Finds an element with key equivalent to key. + * + * @param[in] key key value of the element to search for. + * + * @return Iterator to an element with key equivalent to key. If no such + * element is found, past-the-end iterator is returned. + */ + const_iterator + find(const key_type &key) const + { + return internal_find(key); + } + + /** + * Finds an element with key that compares equivalent to the value x. + * This overload only participates in overload resolution if the + * qualified-id Compare::is_transparent is valid and denotes a type. It + * allows calling this function without constructing an instance of Key. + * + * @param[in] x a value of any type that can be transparently compared + * with a key. + * + * @return Iterator to an element with key equivalent to key. If no such + * element is found, past-the-end iterator is returned. + */ + template ::value, K>::type> + iterator + find(const K &x) + { + return internal_find(x); + } + + /** + * Finds an element with key that compares equivalent to the value x. + * This overload only participates in overload resolution if the + * qualified-id Compare::is_transparent is valid and denotes a type. It + * allows calling this function without constructing an instance of Key. + * + * @param[in] x a value of any type that can be transparently compared + * with a key. + * + * @return Iterator to an element with key equivalent to key. If no such + * element is found, past-the-end iterator is returned. + */ + template ::value, K>::type> + const_iterator + find(const K &x) const + { + return internal_find(x); + } + + /** + * Returns the number of elements with key that compares equivalent to + * the specified argument. + * + * @param[in] key key value of the element to count. + * + * @return Number of elements with key that compares equivalent to the + * specified argument. + */ + size_type + count(const key_type &key) const + { + return internal_count(key); + } + + /** + * Returns the number of elements with key that compares equivalent to + * the specified argument. This overload only participates in overload + * resolution if the qualified-id Compare::is_transparent is valid and + * denotes a type. They allow calling this function without constructing + * an instance of Key. + * + * @param[in] key alternative value to compare to the keys. + * + * @return Number of elements with key that compares equivalent to the + * specified argument. + */ + template ::value, K>::type> + size_type + count(const K &key) const + { + return internal_count(key); + } + + /** + * Checks if there is an element with key equivalent to key in the + * container. + * + * @param[in] key key value of the element to search for. + * + * @return true if there is such an element, otherwise false. + */ + bool + contains(const key_type &key) const + { + return find(key) != end(); + } + + /** + * Checks if there is an element with key that compares equivalent to + * the value x. This overload only participates in overload resolution + * if the qualified-id Compare::is_transparent is valid and denotes a + * type. It allows calling this function without constructing an + * instance of Key. + * + * @param[in] x a value of any type that can be transparently compared + * with a key. + * + * @return true if there is such an element, otherwise false. + */ + template ::value, K>::type> + bool + contains(const K &x) const + { + return find(x) != end(); + } + + /** + * Erases all elements from the container transactionally. + * + * @post size() == 0 + * + * @throw pmem::transaction_error when snapshotting failed. + * @throw rethrows destructor exception. + */ + void + clear() + { + assert(dummy_head->height() > 0); + obj::pool_base pop = get_pool_base(); + + persistent_node_ptr current = dummy_head->next(0); + + obj::flat_transaction::run(pop, [&] { + while (current) { + assert(current->height() > 0); + persistent_node_ptr next = current->next(0); + delete_node(current); + current = next; + } + + node_ptr head = dummy_head.get(); + for (size_type i = 0; i < head->height(); ++i) { + head->set_next_tx(i, nullptr); + } + + on_init_size = 0; + tls_data.clear(); + obj::flat_transaction::snapshot((size_t *)&_size); + _size = 0; + }); + } + + /** + * Returns an iterator to the first element of the container. + * If the map is empty, the returned iterator will be equal to end(). + * + * @return Iterator to the first element. + */ + iterator + begin() + { + return iterator(dummy_head.get()->next(0).get()); + } + + /** + * Returns an iterator to the first element of the container. + * If the map is empty, the returned iterator will be equal to end(). + * + * @return Iterator to the first element. + */ + const_iterator + begin() const + { + return const_iterator(dummy_head.get()->next(0).get()); + } + + /** + * Returns an iterator to the first element of the container. + * If the map is empty, the returned iterator will be equal to end(). + * + * @return Iterator to the first element. + */ + const_iterator + cbegin() const + { + return const_iterator(dummy_head.get()->next(0).get()); + } + + /** + * Returns an iterator to the element following the last element of the + * map. This element acts as a placeholder; attempting to access it + * results in undefined behavior. + * + * @return Iterator to the element following the last element. + */ + iterator + end() + { + return iterator(nullptr); + } + + /** + * Returns an iterator to the element following the last element of the + * map. This element acts as a placeholder; attempting to access it + * results in undefined behavior. + * + * @return Iterator to the element following the last element. + */ + const_iterator + end() const + { + return const_iterator(nullptr); + } + + /** + * Returns an iterator to the element following the last element of the + * map. This element acts as a placeholder; attempting to access it + * results in undefined behavior. + * + * @return Iterator to the element following the last element. + */ + const_iterator + cend() const + { + return const_iterator(nullptr); + } + + /** + * Returns the number of elements in the container, i.e. + * std::distance(begin(), end()). + * + * @return The number of elements in the container. + */ + size_type + size() const + { + return _size.load(std::memory_order_relaxed); + } + + /** + * Returns the maximum number of elements the container is able to hold + * due to system or library implementation limitations, i.e. + * std::distance(begin(), end()) for the largest container. + * + * @return Maximum number of elements. + */ + size_type + max_size() const + { + return std::numeric_limits::max(); + } + + /** + * Checks if the container has no elements, i.e. whether begin() == + * end(). + * + * @return true if the container is empty, false otherwise. + */ + bool + empty() const + { + return 0 == size(); + } + + /** + * XXX: Implement get_allocator() interface. + * Related with: https://github.com/pmem/libpmemobj-cpp/issues/827 + */ + + /** + * Exchanges the contents of the container with those of other + * transactionally. Does not invoke any move, copy, or swap operations + * on individual elements. + * + * @throw pmem::transaction_error when snapshotting failed. + */ + void + swap(swmr_skip_list &other) + { + obj::pool_base pop = get_pool_base(); + obj::flat_transaction::run(pop, [&] { + using pocs_t = typename node_allocator_traits:: + propagate_on_container_swap; + allocator_swap(_node_allocator, other._node_allocator, + pocs_t()); + std::swap(_compare, other._compare); + std::swap(_rnd_generator, other._rnd_generator); + std::swap(dummy_head, other.dummy_head); + on_init_size.swap(other.on_init_size); + + obj::flat_transaction::snapshot((size_t *)&_size); + obj::flat_transaction::snapshot( + (size_t *)&(other._size)); + _size = other._size.exchange(_size, + std::memory_order_relaxed); + }); + } + + /** + * Returns a range containing all elements with the given key in the + * container. The range is defined by two iterators, one pointing to the + * first element that is not less than key and another pointing to the + * first element greater than key. Alternatively, the first iterator may + * be obtained with lower_bound(), and the second with upper_bound(). + * + * Compares the keys to key. + * + * @param[in] key key value to compare the elements to. + * + * @return std::pair containing a pair of iterators defining the wanted + * range: the first pointing to the first element that is not less than + * key and the second pointing to the first element greater than key. If + * there are no elements not less than key, past-the-end (see end()) + * iterator is returned as the first element. Similarly if there are no + * elements greater than key, past-the-end iterator is returned as the + * second element. + */ + std::pair + equal_range(const key_type &key) + { + return std::pair(lower_bound(key), + upper_bound(key)); + } + + /** + * Returns a range containing all elements with the given key in the + * container. The range is defined by two iterators, one pointing to the + * first element that is not less than key and another pointing to the + * first element greater than key. Alternatively, the first iterator may + * be obtained with lower_bound(), and the second with upper_bound(). + * + * Compares the keys to key. + * + * @param[in] key key value to compare the elements to. + * + * @return std::pair containing a pair of iterators defining the wanted + * range: the first pointing to the first element that is not less than + * key and the second pointing to the first element greater than key. If + * there are no elements not less than key, past-the-end (see end()) + * iterator is returned as the first element. Similarly if there are no + * elements greater than key, past-the-end iterator is returned as the + * second element. + */ + std::pair + equal_range(const key_type &key) const + { + return std::pair( + lower_bound(key), upper_bound(key)); + } + + /** + * Returns a range containing all elements with the given key in the + * container. The range is defined by two iterators, one pointing to the + * first element that is not less than key and another pointing to the + * first element greater than key. Alternatively, the first iterator may + * be obtained with lower_bound(), and the second with upper_bound(). + * + * Compares the keys to the value x. This overload only participates in + * overload resolution if the qualified-id Compare::is_transparent is + * valid and denotes a type. They allow calling this function without + * constructing an instance of Key. + * + * @param[in] x alternative value that can be compared to Key. + * + * @return std::pair containing a pair of iterators defining the wanted + * range: the first pointing to the first element that is not less than + * key and the second pointing to the first element greater than key. If + * there are no elements not less than key, past-the-end (see end()) + * iterator is returned as the first element. Similarly if there are no + * elements greater than key, past-the-end iterator is returned as the + * second element. + */ + template ::value, K>::type> + std::pair + equal_range(const K &x) + { + return std::pair(lower_bound(x), + upper_bound(x)); + } + + /** + * Returns a range containing all elements with the given key in the + * container. The range is defined by two iterators, one pointing to the + * first element that is not less than key and another pointing to the + * first element greater than key. Alternatively, the first iterator may + * be obtained with lower_bound(), and the second with upper_bound(). + * + * Compares the keys to the value x. This overload only participates in + * overload resolution if the qualified-id Compare::is_transparent is + * valid and denotes a type. They allow calling this function without + * constructing an instance of Key. + * + * @param[in] key alternative value that can be compared to Key. + * + * @return std::pair containing a pair of iterators defining the wanted + * range: the first pointing to the first element that is not less than + * key and the second pointing to the first element greater than key. If + * there are no elements not less than key, past-the-end (see end()) + * iterator is returned as the first element. Similarly if there are no + * elements greater than key, past-the-end iterator is returned as the + * second element. + */ + template ::value, K>::type> + std::pair + equal_range(const K &key) const + { + return std::pair( + lower_bound(key), upper_bound(key)); + } + + /** + * Returns a const reference to the object that compares the keys. + * + * @return Const reference to the key comparison function object. + */ + const key_compare & + key_comp() const + { + return _compare; + } + + /** + * Returns a reference to the object that compares the keys. + * + * @return Reference to the key comparison function object. + */ + key_compare & + key_comp() + { + return _compare; + } + +private: + /* Status flags stored in insert_stage field */ + enum insert_stage_type : uint8_t { not_started = 0, in_progress = 1 }; + /* + * Structure of thread local data. + * Size should be 64 bytes. + */ + struct tls_entry_type { + persistent_node_ptr ptr; + obj::p size_diff; + obj::p insert_stage; + + char reserved[64 - sizeof(decltype(ptr)) - + sizeof(decltype(size_diff)) - + sizeof(decltype(insert_stage))]; + }; + static_assert(sizeof(tls_entry_type) == 64, + "The size of tls_entry_type should be 64 bytes."); + + /** + * Private helper function. Checks if current transaction stage is equal + * to TX_STAGE_WORK and throws an exception otherwise. + * + * @throw pmem::transaction_scope_error if current transaction stage is + * not equal to TX_STAGE_WORK. + */ + void + check_tx_stage_work() const + { + if (pmemobj_tx_stage() != TX_STAGE_WORK) + throw pmem::transaction_scope_error( + "Function called out of transaction scope."); + } + + /* Helper method which throws an exception when called in a tx */ + static inline void + check_outside_tx() + { + if (pmemobj_tx_stage() != TX_STAGE_NONE) + throw pmem::transaction_scope_error( + "Function called inside transaction scope."); + } + + void + init() + { + if (pool_uuid == 0) + throw pmem::pool_error("Invalid pool handle."); + + _size = 0; + on_init_size = 0; + create_dummy_head(); + } + + void + internal_move(swmr_skip_list &&other) + { + assert(this->empty()); + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + dummy_head = other.dummy_head; + other.dummy_head = nullptr; + other.create_dummy_head(); + + _size.store(other._size.load(std::memory_order_relaxed), + std::memory_order_relaxed); + on_init_size = other.on_init_size; + } + + static const_reference + get_val(const_node_ptr n) + { + assert(n); + return *(n->get()); + } + + static reference + get_val(node_ptr n) + { + assert(n); + return *(n->get()); + } + + static const key_type & + get_key(const_node_ptr n) + { + assert(n); + return traits_type::get_key(get_val(n)); + } + + template + iterator + internal_find(const K &key) + { + iterator it = lower_bound(key); + return (it == end() || _compare(key, traits_type::get_key(*it))) + ? end() + : it; + } + + template + const_iterator + internal_find(const K &key) const + { + const_iterator it = lower_bound(key); + return (it == end() || _compare(key, traits_type::get_key(*it))) + ? end() + : it; + } + + template + size_type + internal_count(const K &key) const + { + if (allow_multimapping) { + std::pair range = + equal_range(key); + return static_cast( + std::distance(range.first, range.second)); + } + return (find(key) == end()) ? size_type(0) : size_type(1); + } + + /** + * Finds position on the @arg level using @arg cmp + * @param level - on which level search prev node + * @param prev - pointer to the start node to search + * @param key - key to search + * @param cmp - callable object to compare two objects + * (_compare member is default comparator) + * @returns pointer to the node which is not satisfy the comparison with + * @arg key + */ + template + persistent_node_ptr + internal_find_position(size_type level, pointer_type &prev, + const K &key, const comparator &cmp) const + { + assert(level < prev->height()); + persistent_node_ptr next = prev->next(level); + pointer_type curr = next.get(); + + while (curr && cmp(get_key(curr), key)) { + prev = curr; + assert(level < prev->height()); + next = prev->next(level); + curr = next.get(); + } + + return next; + } + + /** + * The method finds insert position for the given @arg key. It finds + * successor and predecessor nodes on each level of the skip list. + * + * @param[out] prev_nodes array of pointers to predecessor nodes on each + * level. + * @param[out] next_nodes array of pointers to successor nodes on each + * level. + * @param[in] key inserted key. + */ + template + void + find_insert_pos(prev_array_type &prev_nodes, + next_array_type &next_nodes, const K &key) + { + if (allow_multimapping) { + fill_prev_next_arrays(prev_nodes, next_nodes, key, + not_greater_compare(_compare)); + } else { + fill_prev_next_arrays(prev_nodes, next_nodes, key, + _compare); + } + } + + /** + * The method finds successor and predecessor nodes on each level of the + * skip list for the given @arg key. + * + * @param[out] prev_nodes array of pointers to predecessor nodes on each + * level. + * @param[out] next_nodes array of pointers to successor nodes on each + * level. + * @param[in] key inserted key. + * @param[in] cmp comparator functor used for the search. + */ + template + void + fill_prev_next_arrays(prev_array_type &prev_nodes, + next_array_type &next_nodes, const K &key, + const comparator &cmp) + { + node_ptr prev = dummy_head.get(); + prev_nodes.fill(prev); + next_nodes.fill(nullptr); + + for (size_type h = prev->height(); h > 0; --h) { + persistent_node_ptr next = + internal_find_position(h - 1, prev, key, cmp); + prev_nodes[h - 1] = prev; + next_nodes[h - 1] = next; + } + } + + template + std::pair + internal_try_emplace(K &&key, Args &&... args) + { + return internal_insert( + key, std::piecewise_construct, + std::forward_as_tuple(std::forward(key)), + std::forward_as_tuple(std::forward(args)...)); + } + + template + std::pair + internal_emplace(Args &&... args) + { + check_outside_tx(); + tls_entry_type &tls_entry = tls_data.local(); + obj::pool_base pop = get_pool_base(); + + obj::flat_transaction::run(pop, [&] { + assert(tls_entry.ptr == nullptr); + tls_entry.ptr = + create_node(std::forward(args)...); + ++tls_entry.size_diff; + tls_entry.insert_stage = not_started; + }); + + node_ptr n = tls_entry.ptr.get(); + size_type height = n->height(); + + std::pair insert_result = internal_insert_node( + get_key(n), height, + [&](const next_array_type &next_nodes) + -> persistent_node_ptr & { + assert(tls_entry.insert_stage == not_started); + assert(tls_entry.ptr != nullptr); + + n->set_nexts(pop, next_nodes.data(), height); + + tls_entry.insert_stage = in_progress; + pop.persist(&(tls_entry.insert_stage), + sizeof(tls_entry.insert_stage)); + + return tls_entry.ptr; + }); + + if (!insert_result.second) { + assert(tls_entry.ptr != nullptr); + assert(tls_entry.insert_stage == not_started); + + obj::flat_transaction::run(pop, [&] { + --tls_entry.size_diff; + delete_node(tls_entry.ptr); + tls_entry.ptr = nullptr; + }); + } + + assert(tls_entry.ptr == nullptr); + return insert_result; + } + + /** + * Not thread-safe but can be called within a transaction. + * XXX: Need to optimize for single-threaded case. + */ + template + std::pair + internal_unsafe_emplace(Args &&... args) + { + check_tx_stage_work(); + + persistent_node_ptr new_node = + create_node(std::forward(args)...); + + node_ptr n = new_node.get(); + size_type height = n->height(); + + std::pair insert_result = internal_insert_node( + get_key(n), height, + [&](const next_array_type &next_nodes) + -> persistent_node_ptr & { + assert(new_node != nullptr); + + n->set_nexts(next_nodes.data(), height); + + return new_node; + }); + + if (insert_result.second) { + ++on_init_size; + } else { + assert(new_node != nullptr); + + delete_node(new_node); + } + + return insert_result; + } + + /** + * Construct and insert new node to the skip list in a thread-safe way. + */ + template + std::pair + internal_insert(const K &key, Args &&... args) + { + check_outside_tx(); + tls_entry_type &tls_entry = tls_data.local(); + assert(tls_entry.ptr == nullptr); + + size_type height = random_level(); + + std::pair insert_result = internal_insert_node( + key, height, + [&](const next_array_type &next_nodes) + -> persistent_node_ptr & { + obj::pool_base pop = get_pool_base(); + + obj::flat_transaction::manual tx(pop); + tls_entry.ptr = create_node( + std::forward_as_tuple( + height, next_nodes.data()), + std::forward_as_tuple( + std::forward(args)...)); + + ++(tls_entry.size_diff); + tls_entry.insert_stage = in_progress; + obj::flat_transaction::commit(); + + assert(tls_entry.ptr != nullptr); + return tls_entry.ptr; + }); + + assert(tls_entry.ptr == nullptr); + + return insert_result; + } + + /** + * Try to insert new node to the skip list in a thread-safe way. + */ + template + std::pair + internal_insert_node(const K &key, size_type height, + PrepareNode &&prepare_new_node) + { + prev_array_type prev_nodes; + next_array_type next_nodes; + node_ptr n = nullptr; + + do { + find_insert_pos(prev_nodes, next_nodes, key); + + node_ptr next = next_nodes[0].get(); + if (next && !allow_multimapping && + !_compare(key, get_key(next))) { + + return std::pair(iterator(next), + false); + } + + } while ((n = try_insert_node(prev_nodes, next_nodes, height, + std::forward( + prepare_new_node))) == + nullptr); + + assert(n); + return std::pair(iterator(n), true); + } + + /** + * Try to insert new node to the skip list. + * @returns pointer to the new node if it was inserted. Otherwise, + * returns nullptr. + */ + template + node_ptr + try_insert_node(prev_array_type &prev_nodes, + const next_array_type &next_nodes, size_type height, + PrepareNode &&prepare_new_node) + { + assert(dummy_head->height() >= height); + + lock_array locks; + if (!try_lock_nodes(height, prev_nodes, next_nodes, locks)) { + return nullptr; + } + + node_lock_type new_node_lock; + + persistent_node_ptr &new_node = prepare_new_node(next_nodes); + assert(new_node != nullptr); + node_ptr n = new_node.get(); + + /* + * We need to hold lock to the new node until changes + * are committed to persistent domain. Otherwise, the + * new node would be visible to concurrent inserts + * before it is persisted. + */ + new_node_lock = n->acquire(); + + obj::pool_base pop = get_pool_base(); + /* + * In the loop below we are linking a new node to all layers of + * the skip list. Transaction is not required because in case of + * failure the node is reachable via a pointer from persistent + * TLS. During recovery, we will complete the insert. It is also + * OK if concurrent readers will see not a fully-linked node + * because during recovery the insert procedure will be + * completed. + */ + for (size_type level = 0; level < height; ++level) { + assert(prev_nodes[level]->height() > level); + assert(prev_nodes[level]->next(level) == + next_nodes[level]); + assert(prev_nodes[level]->next(level) == + n->next(level)); + prev_nodes[level]->set_next(pop, level, new_node); + } + +#ifndef NDEBUG + try_insert_node_finish_marker(); +#endif + + new_node = nullptr; + /* We need to persist the node pointer. Otherwise, on a restart, + * this pointer might be not null but the node can be already + * deleted. */ + pop.persist(&new_node, sizeof(new_node)); + + ++_size; +#if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED + VALGRIND_PMC_DO_FLUSH(&_size, sizeof(_size)); +#endif + + assert(n); + return n; + } + + /** + * Used only inside asserts. + * Checks that prev_array is filled with correct values. + */ + bool + check_prev_array(const prev_array_type &prevs, size_type height) + { + for (size_type l = 1; l < height; ++l) { + if (prevs[l] == dummy_head.get()) { + continue; + } + + assert(prevs[l - 1] != dummy_head.get()); + assert(!_compare(get_key(prevs[l - 1]), + get_key(prevs[l]))); + } + + return true; + } + + bool + try_lock_nodes(size_type height, prev_array_type &prevs, + const next_array_type &nexts, lock_array &locks) + { + assert(check_prev_array(prevs, height)); + + for (size_type l = 0; l < height; ++l) { + if (l == 0 || prevs[l] != prevs[l - 1]) { + locks[l] = prevs[l]->acquire(); + } + + persistent_node_ptr next = prevs[l]->next(l); + if (next != nexts[l]) + /* Other thread inserted to this position and + * modified the pointer before we acquired the + * lock */ + return false; + } + + return true; + } + + /** + * Returns an iterator pointing to the first element from the list for + * which cmp(element, key) is false. + * + * @param[in] key key value to compare the elements to. + * @param[in] cmp comparator functor used for the search. + * + * @return Iterator pointing to the first element for which + * cmp(element, key) is false. If no such element is found, a + * past-the-end iterator is returned. + */ + template + const_iterator + internal_get_bound(const K &key, const comparator &cmp) const + { + const_node_ptr prev = dummy_head.get(); + assert(prev->height() > 0); + persistent_node_ptr next = nullptr; + + for (size_type h = prev->height(); h > 0; --h) { + next = internal_find_position(h - 1, prev, key, cmp); + } + + return const_iterator(next.get()); + } + + /** + * Returns an iterator pointing to the first element from the list for + * which cmp(element, key) is false. + * + * @param[in] key key value to compare the elements to. + * @param[in] cmp comparator functor used for the search. + * + * @return Iterator pointing to the first element for which + * cmp(element, key) is false. If no such element is found, a + * past-the-end iterator is returned. + */ + template + iterator + internal_get_bound(const K &key, const comparator &cmp) + { + node_ptr prev = dummy_head.get(); + assert(prev->height() > 0); + persistent_node_ptr next = nullptr; + + for (size_type h = prev->height(); h > 0; --h) { + next = internal_find_position(h - 1, prev, key, cmp); + } + + return iterator(next.get()); + } + + /** + * Returns an iterator pointing to the last element from the list for + * which cmp(element, key) is true. + * + * @param[in] key key value to compare the elements to. + * @param[in] cmp comparator functor used for the search. + * + * @return Iterator pointing to the first element for which + * cmp(element, key) is false. If no such element is found, a + * past-the-end iterator is returned. + */ + template + const_iterator + internal_get_biggest_less_than(const K &key, + const comparator &cmp) const + { + const_node_ptr prev = dummy_head.get(); + assert(prev->height() > 0); + + for (size_type h = prev->height(); h > 0; --h) { + internal_find_position(h - 1, prev, key, cmp); + } + + if (prev == dummy_head.get()) + return end(); + + return const_iterator(prev); + } + + iterator + internal_erase(const_iterator pos, obj::p &size_diff) + { + assert(pos != end()); + + obj::pool_base pop = get_pool_base(); + + std::pair + extract_result(nullptr, nullptr); + + obj::flat_transaction::run(pop, [&] { + extract_result = internal_extract(pos); + + /* Make sure that node was extracted */ + assert(extract_result.first != nullptr); + delete_node(extract_result.first); + --size_diff; + obj::flat_transaction::snapshot((size_type *)&_size); + --_size; + }); + + return iterator(extract_result.second.get()); + } + + /** + * @returns a pointer to extracted node and a pointer to next node + */ + std::pair + internal_extract(const_iterator it) + { + assert(dummy_head->height() > 0); + assert(it != end()); + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + + const key_type &key = traits_type::get_key(*it); + + prev_array_type prev_nodes; + next_array_type next_nodes; + + fill_prev_next_arrays(prev_nodes, next_nodes, key, _compare); + + node_ptr erase_node = next_nodes[0].get(); + assert(erase_node != nullptr); + + if (!_compare(key, get_key(erase_node))) { + /* XXX: this assertion will fail in case of multimap + * because we take the first node with the same key. + * Need to extend algorithm for mutimap. */ + assert(erase_node == it.node); + return internal_extract_node(prev_nodes, next_nodes, + erase_node); + } + + return std::pair( + nullptr, nullptr); + } + + std::pair + internal_extract_node(const prev_array_type &prev_nodes, + const next_array_type &next_nodes, + node_ptr erase_node) + { + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + assert(erase_node != nullptr); + for (size_type level = 0; level < erase_node->height(); + ++level) { + assert(prev_nodes[level]->height() > level); + assert(next_nodes[level].get() == erase_node); + prev_nodes[level]->set_next_tx(level, + erase_node->next(level)); + } + + return std::pair( + next_nodes[0], erase_node->next(0)); + } + + /** + * Get the persistent memory pool where hashmap resides. + * @returns pmem::obj::pool_base object. + */ + obj::pool_base + get_pool_base() const + { + PMEMobjpool *pop = pmemobj_pool_by_ptr(this); + return obj::pool_base(pop); + } + + void + internal_copy(const swmr_skip_list &other) + { + internal_copy(other.begin(), other.end()); + } + + template + void + internal_copy(Iterator first, Iterator last) + { + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + + prev_array_type prev_nodes; + prev_nodes.fill(dummy_head.get()); + size_type sz = 0; + + for (; first != last; ++first, ++sz) { + persistent_node_ptr new_node = create_node(*first); + node_ptr n = new_node.get(); + for (size_type level = 0; level < n->height(); + ++level) { + prev_nodes[level]->set_next_tx(level, new_node); + prev_nodes[level] = n; + } + } + + on_init_size = sz; + /* + * As internal_swap can only be called from one thread, and + * there can be an outer transaction we must make sure that size + * change is transactional + */ + obj::flat_transaction::snapshot((size_type *)&_size); + _size = sz; + assert(std::is_sorted( + this->begin(), this->end(), + [&](const value_type &lhs, const value_type &rhs) { + return lhs.first < rhs.first; + })); + } + + /** Generate random level */ + size_type + random_level() + { + return _rnd_generator(); + } + + static size_type + calc_node_size(size_type height) + { + return sizeof(list_node_type) + + height * sizeof(typename list_node_type::node_pointer); + } + + /** Creates new node */ + template + persistent_node_ptr + create_node(Args &&... args) + { + size_type levels = random_level(); + + return create_node( + std::forward_as_tuple(levels), + std::forward_as_tuple(std::forward(args)...)); + } + + template + persistent_node_ptr + create_node(std::tuple &&node_args, + std::tuple &&value_args) + { + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + + persistent_node_ptr node = creates_dummy_node( + std::forward>(node_args), + index_sequence_for{}); + + construct_value_type( + node, + std::forward>(value_args), + index_sequence_for{}); + + return node; + } + + template + void + construct_value_type(persistent_node_ptr node, Tuple &&args, + index_sequence) + { + node_ptr new_node = node.get(); + + node_allocator_traits::construct( + _node_allocator, new_node->get(), + std::get(std::forward(args))...); + } + + /** + * Creates dummy head. + * + * @pre Always called from ctor. + */ + void + create_dummy_head() + { + dummy_head = creates_dummy_node(MAX_LEVEL); + } + + template + persistent_node_ptr + creates_dummy_node(Tuple &&args, index_sequence) + { + return creates_dummy_node( + std::get(std::forward(args))...); + } + + /** + * Creates new node, value_type should be constructed separately. + * Each node object has different size which depends on number of layers + * the node is linked. In this method we calculate the size of the new + * node based on the node height. Then required amount of bytes are + * allocated and casted to the persistent_node_ptr. + * + * @pre Should be called inside transaction. + */ + template + persistent_node_ptr + creates_dummy_node(size_type height, Args &&... args) + { + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + size_type sz = calc_node_size(height); + + persistent_node_ptr n = + node_allocator_traits::allocate(_node_allocator, sz) + .raw(); + + assert(n != nullptr); + + node_allocator_traits::construct(_node_allocator, n.get(), + height, + std::forward(args)...); + + return n; + } + + template + void + delete_node(persistent_node_ptr &node) + { + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + node_ptr n = node.get(); + size_type sz = calc_node_size(n->height()); + + /* Destroy value */ + if (!is_dummy) + node_allocator_traits::destroy(_node_allocator, + n->get()); + /* Destroy node */ + node_allocator_traits::destroy(_node_allocator, n); + /* Deallocate memory */ + deallocate_node(node, sz); + node = nullptr; + } + + void + deallocate_node(persistent_node_ptr &node, size_type sz) + { + /* + * Each node object has different size which depends on number + * of layers the node is linked. Therefore, allocate/deallocate + * just a raw byte array. persistent_ptr is used as a + * pointer to raw array of bytes. + */ + obj::persistent_ptr tmp = + node.to_persistent_ptr().raw(); + node_allocator_traits::deallocate(_node_allocator, tmp, sz); + } + + void + delete_dummy_head() + { + assert(dummy_head != nullptr); + delete_node(dummy_head); + assert(dummy_head == nullptr); + } + + iterator + get_iterator(const_iterator it) + { + return iterator( + const_cast(it.node)); + } + + /** Process any information which was saved to tls and clears tls */ + void + tls_restore() + { + int64_t last_run_size = 0; + obj::pool_base pop = get_pool_base(); + + for (auto &tls_entry : tls_data) { + persistent_node_ptr &node = tls_entry.ptr; + auto &size_diff = tls_entry.size_diff; + if (node) { + /* + * We are completing inserts which were in + * progress before the crash because readers + * might saw incompleted inserts before the + * crash. We set the in_progress flag inside + * try_insert_node function when we locked the + * predecessors for the new node, therefore, + * only single node with the same key might have + * the in_progress status. + */ + if (tls_entry.insert_stage == in_progress) { + complete_insert(tls_entry); + } else { + obj::flat_transaction::run(pop, [&] { + --(tls_entry.size_diff); + delete_node(node); + node = nullptr; + }); + } + } + + assert(node == nullptr); + + last_run_size += size_diff; + } + + /* Make sure that on_init_size + last_run_size >= 0 */ + assert(last_run_size >= 0 || + on_init_size > + static_cast(std::abs(last_run_size))); + obj::flat_transaction::run(pop, [&] { + tls_data.clear(); + on_init_size += static_cast(last_run_size); + }); + _size = on_init_size; +#if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED + VALGRIND_PMC_DO_FLUSH(&_size, sizeof(_size)); +#endif + } + + void + complete_insert(tls_entry_type &tls_entry) + { + persistent_node_ptr &node = tls_entry.ptr; + assert(node != nullptr); + assert(tls_entry.insert_stage == in_progress); + prev_array_type prev_nodes; + next_array_type next_nodes; + node_ptr n = node.get(); + const key_type &key = get_key(n); + size_type height = n->height(); + + fill_prev_next_arrays(prev_nodes, next_nodes, key, _compare); + obj::pool_base pop = get_pool_base(); + + /* Node was partially linked */ + for (size_type level = 0; level < height; ++level) { + assert(prev_nodes[level]->height() > level); + assert(prev_nodes[level]->next(level) == + next_nodes[level]); + + if (prev_nodes[level]->next(level) != node) { + /* Otherwise, node already linked on + * this layer */ + assert(n->next(level) == next_nodes[level]); + prev_nodes[level]->set_next(pop, level, node); + } + } + + node = nullptr; + pop.persist(&node, sizeof(node)); + } + + struct not_greater_compare { + const key_compare &my_less_compare; + + not_greater_compare(const key_compare &less_compare) + : my_less_compare(less_compare) + { + } + + template + bool + operator()(const K1 &first, const K2 &second) const + { + return !my_less_compare(second, first); + } + }; + + const uint64_t pool_uuid = pmemobj_oid(this).pool_uuid_lo; + node_allocator_type _node_allocator; + key_compare _compare; + random_level_generator_type _rnd_generator; + persistent_node_ptr dummy_head; + + enumerable_thread_specific tls_data; + + std::atomic _size; + + /** + * This variable holds real size after the skip list is initialized. + * It holds real value of size only after initialization (before any + * insert/remove). + */ + obj::p on_init_size; +}; /* class swmr_skip_list */ + +template +class map_traits { +public: + static constexpr size_t max_level = MAX_LEVEL; + using random_generator_type = RND_GENERATOR; + using key_type = Key; + using mapped_type = Value; + using compare_type = KeyCompare; + using value_type = pair; + using reference = value_type &; + using const_reference = const value_type &; + using allocator_type = Allocator; + + /** + * pmem::detail::swmr_skip_list allows multimapping. If this flag + * is true we can store multiple entries with the same key. For + * concurrent_map it should be false; For concurrent_multimap it should + * be true; + */ + constexpr static bool allow_multimapping = AllowMultimapping; + + static const key_type & + get_key(const_reference val) + { + return val.first; + } +}; /* class map_traits */ + +} /* namespace detail */ +} /* namespace pmem */ + +#endif /* PMEMOBJ_SWMR_SKIP_LIST_IMPL_HPP */ diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e3b238f87e..d21362dfb3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -304,6 +304,9 @@ if(TEST_SELF_RELATIVE_POINTER) build_test(self_relative_ptr ptr/self_relative_ptr.cpp) add_test_generic(NAME self_relative_ptr TRACERS none memcheck pmemcheck) + build_test(pa_self_relative_ptr ptr/pa_self_relative_ptr.cpp) + add_test_generic(NAME pa_self_relative_ptr TRACERS none memcheck pmemcheck) + build_test(self_relative_ptr_arith ptr/self_relative_ptr_arith.cpp) add_test_generic(NAME self_relative_ptr_arith TRACERS none memcheck pmemcheck) diff --git a/tests/ptr/pa_self_relative_ptr.cpp b/tests/ptr/pa_self_relative_ptr.cpp new file mode 100644 index 0000000000..4cd0e39b67 --- /dev/null +++ b/tests/ptr/pa_self_relative_ptr.cpp @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +/* + * obj_cpp_ptr.c -- cpp bindings test + * + */ + +#include "ptr.hpp" +#include "unittest.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define LAYOUT "cpp" + +template +using pa_self_relative_ptr = nvobj::experimental::pa_self_relative_ptr; +using self_relative_ptr_base = nvobj::experimental::self_relative_ptr_base; + +namespace +{ + +using root = templated_root; + +/* + * test_offset -- test offset calculation within a hierarchy + */ +void +test_offset(nvobj::pool &pop) +{ + struct A { + uint64_t a; + }; + + struct B { + uint64_t b; + }; + + struct C : public A, public B { + uint64_t c; + }; + + try { + nvobj::transaction::run(pop, [] { + auto distance = + self_relative_ptr_base::distance_between; + + pa_self_relative_ptr cptr = nvobj::make_persistent(); + pa_self_relative_ptr bptr = cptr; + UT_ASSERT(distance(cptr, bptr) > 0); + UT_ASSERT(static_cast( + distance(cptr, bptr)) == sizeof(A)); + + pa_self_relative_ptr bptr2; + bptr2 = cptr; + UT_ASSERT(distance(cptr, bptr2) > 0); + UT_ASSERT(static_cast( + distance(cptr, bptr2)) == sizeof(A)); + + pa_self_relative_ptr bptr3 = + static_cast>(cptr); + UT_ASSERT(distance(cptr, bptr3) > 0); + UT_ASSERT(static_cast( + distance(cptr, bptr3)) == sizeof(A)); + + nvobj::delete_persistent(cptr); + }); + } catch (...) { + UT_ASSERT(0); + } +} + +void +test_base_ptr_casting(nvobj::pool &pop) +{ + auto r = pop.root(); + + try { + nvobj::transaction::run(pop, [&] { + r->arr[0] = pa_self_relative_ptr{ + nvobj::make_persistent()}; + r->arr[1] = pa_self_relative_ptr{ + nvobj::make_persistent(TEST_INT)}; + r->arr[2] = nullptr; + + UT_ASSERTne(r->arr[0].to_void_pointer(), nullptr); + UT_ASSERTeq(*static_cast( + r->arr[1].to_void_pointer()), + TEST_INT); + UT_ASSERTeq(r->arr[2].to_void_pointer(), nullptr); + + pa_self_relative_ptr tmp0 = + static_cast(r->arr[0].to_void_pointer()); + pa_self_relative_ptr tmp1 = + static_cast(r->arr[1].to_void_pointer()); + pa_self_relative_ptr tmp2 = + static_cast(r->arr[2].to_void_pointer()); + nvobj::delete_persistent(tmp0); + nvobj::delete_persistent(tmp1); + nvobj::delete_persistent(tmp2); + }); + } catch (...) { + UT_ASSERT(0); + } +} + +void +test_base_ptr_assignment() +{ + int tmp; + + self_relative_ptr_base ptr1 = &tmp; + self_relative_ptr_base ptr2 = nullptr; + + ptr1 = ptr2; + + UT_ASSERT(ptr1.to_void_pointer() == nullptr); + UT_ASSERT(ptr2.to_void_pointer() == nullptr); +} + +} /* namespace */ + +static void +test(int argc, char *argv[]) +{ + if (argc != 2) + UT_FATAL("usage: %s file-name", argv[0]); + + const char *path = argv[1]; + + nvobj::pool pop; + + try { + pop = nvobj::pool::create(path, LAYOUT, PMEMOBJ_MIN_POOL, + S_IWUSR | S_IRUSR); + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + test_root_pointers( + *pop.root()); + test_ptr_operators_null(); + test_ptr_transactional(pop); + test_ptr_array(pop); + test_offset(pop); + test_base_ptr_casting(pop); + test_base_ptr_assignment(); + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} From 5e942eb8e5b34573470397dda30a51370c227281 Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Thu, 22 Jul 2021 17:46:53 +0800 Subject: [PATCH 02/34] Add new pa_self_relative_ptr (persistent-aware self_relative_ptr) and swmr_map Add some related tests --- CMakeLists.txt | 1 + .../atomic_pa_self_relative_ptr.hpp | 270 +++++++ .../experimental/pa_self_relative_ptr.hpp | 274 +++++-- .../experimental/swmr_skip_list_impl.hpp | 137 ++-- tests/CMakeLists.txt | 29 + tests/ptr/ptr.hpp | 7 + tests/swmr_map/pmreorder.conf | 6 + tests/swmr_map/swmr_map.cpp | 252 +++++++ tests/swmr_map/swmr_map_insert_reopen.cpp | 133 ++++ tests/swmr_map/swmr_map_pmreorder_simple.cpp | 169 +++++ tests/swmr_map/swmr_map_singlethread.cpp | 701 ++++++++++++++++++ tests/swmr_map/swmr_map_tx.cpp | 170 +++++ 12 files changed, 2027 insertions(+), 122 deletions(-) create mode 100644 include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp create mode 100644 tests/swmr_map/pmreorder.conf create mode 100644 tests/swmr_map/swmr_map.cpp create mode 100644 tests/swmr_map/swmr_map_insert_reopen.cpp create mode 100644 tests/swmr_map/swmr_map_pmreorder_simple.cpp create mode 100644 tests/swmr_map/swmr_map_singlethread.cpp create mode 100644 tests/swmr_map/swmr_map_tx.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 60fd611dc7..639382110f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -97,6 +97,7 @@ option(TEST_SEGMENT_VECTOR_VECTOR_EXPSIZE "enable testing of pmem::obj::segment_ option(TEST_SEGMENT_VECTOR_VECTOR_FIXEDSIZE "enable testing of pmem::obj::segment_vector with vector as segment_vector_type and fixed_size_policy" ON) option(TEST_ENUMERABLE_THREAD_SPECIFIC "enable testing of pmem::obj::enumerable_thread_specific" ON) option(TEST_CONCURRENT_MAP "enable testing of pmem::obj::experimental::concurrent_map (depends on TEST_STRING)" ON) +option(TEST_SWMR_MAP "enable testing of pmem::obj::experimental::swmr_map (depends on TEST_STRING)" ON) option(TEST_SELF_RELATIVE_POINTER "enable testing of pmem::obj::experimental::self_relative_ptr" ON) option(TEST_RADIX_TREE "enable testing of pmem::obj::experimental::radix_tree" ON) option(TEST_MPSC_QUEUE "enable testing of pmem::obj::experimental::mpsc_queue" ON) diff --git a/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp new file mode 100644 index 0000000000..e42bb2fb51 --- /dev/null +++ b/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2020-2021, Intel Corporation */ + +#ifndef LIBPMEMOBJ_CPP_ATOMIC_SELF_RELATIVE_PTR_HPP +#define LIBPMEMOBJ_CPP_ATOMIC_SELF_RELATIVE_PTR_HPP + +#include +#include +#include +#include + +#include + +namespace std +{ +/** + * Atomic specialization for pa_self_relative_ptr + * + * Doesn't automatically add itself to the transaction. + * The user is responsible for persisting the data. + */ +template +struct atomic> { +private: + using ptr_type = pmem::detail::self_relative_ptr_base_impl< + std::atomic>; + using accessor = pmem::detail::self_relative_accessor< + std::atomic>; + +public: + using this_type = atomic; + using value_type = pmem::obj::experimental::pa_self_relative_ptr; + using difference_type = typename value_type::difference_type; + + /* + * Constructors + */ + + constexpr atomic() noexcept = default; + atomic(value_type value) : ptr() + { + store(value); + } + atomic(const atomic &) = delete; + + void + store(value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto offset = accessor::pointer_to_offset(ptr, desired.get()); + LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(order, &ptr); + accessor::get_offset(ptr).store((offset & desired.flush_set_mask()), order); + } + + value_type + load(std::memory_order order = std::memory_order_seq_cst) const noexcept + { + auto offset = accessor::get_offset(ptr).load(order); + LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(order, &ptr); + auto pointer = accessor::offset_to_pointer(offset | ~(value_type::flush_set_mask(offset)), ptr); + return value_type{pointer, value_type::flush_needed(offset)}; + } + + value_type + exchange(value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto new_offset = + accessor::pointer_to_offset(ptr, desired.get()); + auto old_offset = + accessor::get_offset(ptr).exchange(new_offset & desired.flush_set_mask(), order); + return value_type{ + accessor::offset_to_pointer(old_offset | ~(value_type::flush_set_mask(old_offset)), ptr), value_type::flush_needed(old_offset)}; + } + + bool + compare_exchange_weak(value_type &expected, + value_type desired, + std::memory_order success, + std::memory_order failure) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto expected_actual = expected_offset & expected.flush_set_mask(); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + auto desired_actual = desired_offset & desired.flush_set_mask(); + bool result = accessor::get_offset(ptr).compare_exchange_weak( + expected_actual, desired_actual, success, failure); + if (!result) { + expected = value_type{accessor::offset_to_pointer( + expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), + value_type::flush_needed(expected_actual)}; + } + return result; + } + + bool + compare_exchange_weak( + value_type &expected, value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto expected_actual = expected_offset & expected.flush_set_mask(); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + auto desired_actual = desired_offset & desired.flush_set_mask(); + bool result = accessor::get_offset(ptr).compare_exchange_weak( + expected_actual, desired_actual, order); + if (!result) { + expected = value_type{accessor::offset_to_pointer( + expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), + value_type::flush_needed(expected_actual)}; + } + return result; + } + + bool + compare_exchange_strong(value_type &expected, value_type desired, + std::memory_order success, + std::memory_order failure) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto expected_actual = expected_offset & expected.flush_set_mask(); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + auto desired_actual = desired_offset & desired.flush_set_mask(); + bool result = accessor::get_offset(ptr).compare_exchange_strong( + expected_actual, desired_actual, success, failure); + if (!result) { + expected = value_type{accessor::offset_to_pointer( + expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), + value_type::flush_needed(expected_actual)}; + } + return result; + } + + bool + compare_exchange_strong( + value_type &expected, value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto expected_actual = expected_offset & expected.flush_set_mask(); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + auto desired_actual = desired_offset & desired.flush_set_mask(); + bool result = accessor::get_offset(ptr).compare_exchange_strong( + expected_actual, desired_actual, order); + if (!result) { + expected = value_type{accessor::offset_to_pointer( + expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), + value_type::flush_needed(expected_actual)}; + } + return result; + } + + value_type + fetch_add(difference_type val, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto offset = accessor::get_offset(ptr).fetch_add( + val * static_cast(sizeof(T)), order); + return value_type{accessor::offset_to_pointer(offset | ~(value_type::flush_set_mask(offset)), ptr), value_type::flush_needed(offset)}; + } + + value_type + fetch_sub(difference_type val, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto offset = accessor::get_offset(ptr).fetch_sub( + val * static_cast(sizeof(T)), order); + return value_type{accessor::offset_to_pointer(offset | ~(value_type::flush_set_mask(offset)), ptr), value_type::flush_needed(offset)}; + } + + bool + is_lock_free() const noexcept + { + return accessor::get_offset(ptr).is_lock_free(); + } + + /* + * Operators + */ + + operator value_type() const noexcept + { + return load(); + } + + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + value_type + operator=(value_type desired) noexcept + { + store(desired); + return desired; + } + + value_type + operator++() noexcept + { + return this->fetch_add(1) + 1; + } + + value_type + operator++(int) noexcept + { + return this->fetch_add(1); + } + + value_type + operator--() noexcept + { + return this->fetch_sub(1) - 1; + } + + value_type + operator--(int) noexcept + { + return this->fetch_sub(1); + } + + value_type + operator+=(difference_type diff) noexcept + { + return this->fetch_add(diff) + diff; + } + + value_type + operator-=(difference_type diff) noexcept + { + return this->fetch_sub(diff) - diff; + } + +private: + ptr_type ptr; +}; + +} /* namespace std */ + +namespace pmem +{ + +namespace detail +{ + +/** + * can_do_snapshot atomic specialization for pa_self_relative_ptr. Not thread safe. + * + * Use in a single threaded environment only. + */ +template +struct can_do_snapshot>> { + using snapshot_type = obj::experimental::pa_self_relative_ptr; + static constexpr bool value = sizeof(std::atomic) == + sizeof(typename snapshot_type::offset_type); + static_assert(value, + "std::atomic should be the same size"); +}; + +} /* namespace detail */ + +} /* namespace pmem */ + +#endif diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index 73b5c019f8..7242cf533b 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -18,9 +18,9 @@ * Flush is needed if it is 0, not needed if it is 1. * */ -#define kFlushNeeded ~(1UL << 1) +#define kFlushNeeded ~(1L << 1) // flag &= kFlushNeeded, to indicate it needs flush -#define FlushNeeded(offset) (!((offset >> 1) & 1U)) +#define FlushNeeded(offset) (!((offset >> 1) & 1)) // return true if needs explicit flush, false otherwise. namespace pmem @@ -56,7 +56,7 @@ class pa_self_relative_ptr : public self_relative_ptr_base { get() const noexcept { return static_cast( - self_relative_ptr_base::to_void_pointer()); + this->to_void_pointer()); } private: @@ -105,28 +105,41 @@ class pa_self_relative_ptr : public self_relative_ptr_base { * Default constructor, equal the nullptr */ constexpr pa_self_relative_ptr() noexcept = default; + + /** + * Nullptr constructor + */ + constexpr pa_self_relative_ptr(std::nullptr_t) noexcept + : self_relative_ptr_base() + { +// std::cerr << "pa_self_relative_ptr 1" << std::endl; + } /** * Volatile pointer constructor. * * @param ptr volatile pointer, pointing to persistent memory. */ - pa_self_relative_ptr(element_type *ptr, bool flushNeeded = true) noexcept + pa_self_relative_ptr(element_type *ptr, bool flushNeeded = false) noexcept : base_type(self_offset(ptr)) { - uintptr_t mask = (flushNeeded == true); +// std::cerr << "pa_self_relative_ptr 21: flushNeeded=" << flushNeeded << " : offset = " << std::hex << this->offset << std::endl; + intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); +// std::cerr << "pa_self_relative_ptr 22" << ": offset = " << std::hex << this->offset << std::endl; } /** * Constructor from persistent_ptr */ - pa_self_relative_ptr(persistent_ptr ptr, bool flushNeeded = true) noexcept + pa_self_relative_ptr(persistent_ptr ptr, bool flushNeeded = false) noexcept : base_type(self_offset(ptr.get())) { - uintptr_t mask = (flushNeeded == true); +// std::cerr << "pa_self_relative_ptr 31: flushNeeded=" << flushNeeded << " : offset = " << std::hex << this->offset << std::endl; + intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); +// std::cerr << "pa_self_relative_ptr 32" << ": offset = " << std::hex << this->offset << std::endl; } /** @@ -136,13 +149,15 @@ class pa_self_relative_ptr : public self_relative_ptr_base { * * @param oid C-style persistent pointer */ - pa_self_relative_ptr(PMEMoid oid, bool flushNeeded = true) noexcept + pa_self_relative_ptr(PMEMoid oid, bool flushNeeded = false) noexcept : base_type(self_offset( static_cast(pmemobj_direct(oid)))) { - uintptr_t mask = (flushNeeded == true); +// std::cerr << "pa_self_relative_ptr 41: flushNeeded=" << flushNeeded << " : offset = " << std::hex << this->offset << std::endl; + intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); +// std::cerr << "pa_self_relative_ptr 42" << ": offset = " << std::hex << this->offset << std::endl; } /** @@ -151,6 +166,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { pa_self_relative_ptr(const pa_self_relative_ptr &ptr) noexcept : base_type(ptr) { +// std::cerr << "pa_self_relative_ptr copy 1" << ": offset = " << std::hex << this->offset << std::endl; + this->offset &= ptr.flush_set_mask(); } /** @@ -169,12 +186,84 @@ class pa_self_relative_ptr : public self_relative_ptr_base { pa_self_relative_ptr(pa_self_relative_ptr const &r) noexcept : base_type(self_offset(static_cast(r.get()))) { +// std::cerr << "pa_self_relative_ptr copy 2" << ": offset = " << std::hex << this->offset << std::endl; + this->offset &= r.flush_set_mask(); } ~pa_self_relative_ptr() { verify_type(); } + /** + * Swaps two self_relative_ptr_base objects of the same type. + * + * @param[in,out] other the other self_relative_ptr to swap. + */ + void + swap(pa_self_relative_ptr &other) + { + if (this == &other) + return; + detail::conditional_add_to_tx(this); + detail::conditional_add_to_tx(&other); + auto first = this->to_byte_pointer(); + auto mask = this->flush_set_mask(); + auto second = other.to_byte_pointer(); + this->offset = pointer_to_offset(second); + this->offset &= other.flush_set_mask(); + other.offset = other.pointer_to_offset(first); + other.offset &= mask; + } + + /** + * Conversion to byte pointer + */ + byte_ptr_type + to_byte_pointer() const noexcept + { + return static_cast(this->to_void_pointer()); + } + + /** + * Conversion to void* + */ + void * + to_void_pointer() const noexcept + { +// std::cerr << "to_void_pointer 11: flushNeeded=" << flush_needed() << " : offset = " << std::hex << this->offset << std::endl; +// intptr_t mask = is_null() == true; +// --mask; +// std::cerr << "to_void_pointer 12: mask=" << std::hex << mask << std::endl; +// std::cerr << "to_void_pointer 13: ptr=" << std::hex << ((this->offset | ~kFlushNeeded) & mask) << std::endl; + return this->offset_to_pointer(this->offset);// | ~kFlushNeeded) & mask); + } + + /** + * Explicit conversion operator to void* + */ + explicit + operator void *() const noexcept + { + return to_void_pointer(); + } + + /** + * Explicit conversion operator to byte pointer + */ + explicit operator byte_ptr_type() const noexcept + { + return to_byte_pointer(); + } + + /** + * Byte distance between two relative pointers + */ + static difference_type + distance_between(const pa_self_relative_ptr &first, + const pa_self_relative_ptr &second) + { + return second.to_byte_pointer() - first.to_byte_pointer(); + } /** * Get the direct pointer. @@ -184,8 +273,7 @@ class pa_self_relative_ptr : public self_relative_ptr_base { inline element_type * get() const noexcept { - return static_cast(base_type::offset_to_pointer( - this->offset | ~kFlushNeeded)); + return static_cast(this->to_void_pointer()); } /** @@ -200,11 +288,36 @@ class pa_self_relative_ptr : public self_relative_ptr_base { /** * Check if flush is needed */ - bool + inline bool flush_needed() const { - return FlushNeeded(this->offset); + return (!is_null() && FlushNeeded(this->offset)); } + + static inline bool + flush_needed(offset_type offset) { + return ((offset != nullptr_offset) && FlushNeeded(offset)); + } + + /** + * return mask for caller to & in order to set the flush_needed flag. + * can also be used to clear the flag using offset |= ~flush_set_mask(). + */ + inline intptr_t flush_set_mask() const { + intptr_t mask = flush_needed(); + --mask; + return (mask | kFlushNeeded); + } + /** + * static version of flush_set_mask() for the given offset. + */ + static inline intptr_t + flush_set_mask(offset_type offset) { + intptr_t mask = this_type::flush_needed(offset); + --mask; + return (mask | kFlushNeeded); + } + /** * return offset for debug only */ @@ -326,7 +439,7 @@ class pa_self_relative_ptr : public self_relative_ptr_base { operator++() { detail::conditional_add_to_tx(this); - uintptr_t mask = (this->flush_needed() == true); + intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & ((this->offset | ~kFlushNeeded) @@ -353,7 +466,7 @@ class pa_self_relative_ptr : public self_relative_ptr_base { operator--() { detail::conditional_add_to_tx(this); - uintptr_t mask = (this->flush_needed() == true); + intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & ((this->offset | ~kFlushNeeded) @@ -380,7 +493,7 @@ class pa_self_relative_ptr : public self_relative_ptr_base { operator+=(std::ptrdiff_t s) { detail::conditional_add_to_tx(this); - uintptr_t mask = (this->flush_needed() == true); + intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & ((this->offset | ~kFlushNeeded) @@ -395,7 +508,7 @@ class pa_self_relative_ptr : public self_relative_ptr_base { operator-=(std::ptrdiff_t s) { detail::conditional_add_to_tx(this); - uintptr_t mask = (this->flush_needed() == true); + intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & ((this->offset | ~kFlushNeeded) @@ -403,51 +516,6 @@ class pa_self_relative_ptr : public self_relative_ptr_base { return *this; } - /** - * Conversion to byte pointer - */ - byte_ptr_type - to_byte_pointer() const noexcept - { - return static_cast(this->to_void_pointer()); - } - - /** - * Conversion to void* - */ - void * - to_void_pointer() const noexcept - { - return base_type::offset_to_pointer(this->offset | - ~kFlushNeeded); - } - - /** - * Explicit conversion operator to void* - */ - explicit - operator void *() const noexcept - { - return to_void_pointer(); - } - - /** - * Explicit conversion operator to byte pointer - */ - explicit operator byte_ptr_type() const noexcept - { - return to_byte_pointer(); - } - - /** - * Byte distance between two relative pointers - */ - static difference_type - distance_between(const pa_self_relative_ptr &first, - const pa_self_relative_ptr &second) - { - return second.to_byte_pointer() - first.to_byte_pointer(); - } protected: /** * Verify if element_type is not polymorphic @@ -458,14 +526,94 @@ class pa_self_relative_ptr : public self_relative_ptr_base { static_assert(!std::is_polymorphic::value, "Polymorphic types are not supported"); } + /** + * Conversion to void* use other offset + */ + void * + offset_to_pointer(difference_type other_offset) const noexcept + { + intptr_t mask = other_offset == nullptr_offset; + --mask; + return base_type::offset_to_pointer((other_offset | ~kFlushNeeded) & mask); + } + +// /** +// * Conversion self_relative_ptr_base to offset from itself +// */ +// difference_type +// pointer_to_offset(const pa_self_relative_ptr &ptr) const noexcept +// { +// /* +// This version without branches is vectorization-friendly. +// mask = is_null() should not create a branch in the code. +// In this line, we just assign 0 or 1 to the mask variable. +// +// This code is equal: +// return ptr.is_null() +// ? nullptr_offset +// : ptr.offset + this->distance_between_self(ptr); +// */ +// uintptr_t mask = ptr.is_null(); +// --mask; +// difference_type distance_between_self = +// reinterpret_cast(&ptr) - +// reinterpret_cast(this); +// distance_between_self &= +// reinterpret_cast(mask); +// return ptr.offset + distance_between_self; +// } private: + static constexpr difference_type nullptr_offset = 0; difference_type self_offset(element_type *ptr) const noexcept { return base_type::pointer_to_offset(static_cast(ptr)); } + template + friend class pa_self_relative_accessor; }; +// +///** +// * Static class accessor to self_relative_ptr_base +// */ +//template +//class pa_self_relative_accessor { +//public: +// using access_type = pmem::detail::self_relative_ptr_base_impl

; +// using difference_type = typename access_type::difference_type; +// +// template +// static difference_type +// pointer_to_offset(const access_type &obj, PointerType *ptr, bool flush_needed) +// { +// intptr_t mask = (flush_needed == true); +// --mask; +// return ((mask | kFlushNeeded) & obj.pointer_to_offset(static_cast(ptr))); +// } +// +// template +// static PointerType * +// offset_to_pointer(difference_type offset, const access_type &obj) +// { +// intptr_t mask = (offset == access_type::nullptr_offset); +// --mask; +// auto ptr = obj.offset_to_pointer((offset | ~kFlushNeeded) & mask); +// return static_cast(ptr); +// } +// +// static P & +// get_offset(access_type &ptr) +// { +// return ptr.offset; +// } +// +// static const P & +// get_offset(const access_type &ptr) +// { +// return ptr.offset; +// } +//}; /** * Swaps two pa_self_relative_ptr objects of the same type. @@ -736,4 +884,6 @@ operator<<(std::ostream &os, pa_self_relative_ptr const &ptr) } } } + + #endif // LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp index 9b985d8445..f9dd0e5908 100644 --- a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp +++ b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp @@ -23,9 +23,8 @@ #include #include -#include +#include #include - #include namespace pmem @@ -116,6 +115,32 @@ class swmr_skip_list_node { return *get(); } + node_pointer + next(size_type level) + { + assert(level < height()); + node_pointer current = get_next(level).load(std::memory_order_acquire); + if (!current.flush_needed()) { + return current; + } + obj::pool_base pop = get_pool_base(); + node_pointer desired; + while (true) { + desired = node_pointer{current.get(), false}; + if (get_next(level).compare_exchange_weak( + current, desired)) { + auto &node = get_next(level); + pop.persist(&node, sizeof(node)); + break; + } + current = get_next(level).load(std::memory_order_acquire); + if (!current.flush_needed()) { + return current; + } + } + return desired; + } + node_pointer next(size_type level) const { @@ -127,23 +152,33 @@ class swmr_skip_list_node { * Can`t be called concurrently * Should be called inside a transaction */ - void - set_next_tx(size_type level, node_pointer next) - { - assert(level < height()); - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - auto &node = get_next(level); - obj::flat_transaction::snapshot(&node); - node.store(next, std::memory_order_release); - } +// void +// set_next_tx(size_type level, node_pointer next) +// { +// assert(level < height()); +// assert(pmemobj_tx_stage() == TX_STAGE_WORK); +// auto &node = get_next(level); +// obj::flat_transaction::snapshot(&node); +// node.store(next, std::memory_order_release); +// } +// +// void +// set_next(obj::pool_base pop, size_type level, node_pointer next) +// { +// assert(level < height()); +// auto &node = get_next(level); +// node.store(next, std::memory_order_release); +// pop.persist(&node, sizeof(node)); +// } void - set_next(obj::pool_base pop, size_type level, node_pointer next) + set_next(size_type level, node_pointer next) { assert(level < height()); +// assert(pmemobj_tx_stage() == TX_STAGE_WORK); auto &node = get_next(level); - node.store(next, std::memory_order_release); - pop.persist(&node, sizeof(node)); +// obj::flat_transaction::snapshot(&node); + node.store(node_pointer{next.get(), true}, std::memory_order_release); } void @@ -153,19 +188,19 @@ class swmr_skip_list_node { auto *nexts = get_nexts(); for (size_type i = 0; i < h; i++) { - nexts[i].store(new_nexts[i], std::memory_order_relaxed); + nexts[i].store(node_pointer{new_nexts[i].get(), true}, std::memory_order_relaxed); } } - void - set_nexts(obj::pool_base pop, const node_pointer *new_nexts, - size_type h) - { - set_nexts(new_nexts, h); - - auto *nexts = get_nexts(); - pop.persist(nexts, sizeof(nexts[0]) * h); - } +// void +// set_nexts(obj::pool_base pop, const node_pointer *new_nexts, +// size_type h) +// { +// set_nexts(new_nexts, h); +// +// auto *nexts = get_nexts(); +// pop.persist(nexts, sizeof(nexts[0]) * h); +// } /** @return number of layers */ size_type @@ -201,6 +236,12 @@ class swmr_skip_list_node { reinterpret_cast(this + 1); return arr[level]; } + obj::pool_base + get_pool_base() const + { + PMEMobjpool *pop = pmemobj_pool_by_ptr(this); + return obj::pool_base(pop); + } mutex_type mutex; union { @@ -291,7 +332,7 @@ class swmr_skip_list_iterator { node_ptr node; template - friend class concurrent_skip_list; + friend class swmr_skip_list; template friend bool operator==(const swmr_skip_list_iterator &lhs, @@ -1862,7 +1903,8 @@ class swmr_skip_list { node_ptr head = dummy_head.get(); for (size_type i = 0; i < head->height(); ++i) { - head->set_next_tx(i, nullptr); +// head->set_next_tx(i, nullptr); + head->set_next(i, nullptr); } on_init_size = 0; @@ -2396,7 +2438,8 @@ class swmr_skip_list { assert(tls_entry.insert_stage == not_started); assert(tls_entry.ptr != nullptr); - n->set_nexts(pop, next_nodes.data(), height); +// n->set_nexts(pop, next_nodes.data(), height); + n->set_nexts(next_nodes.data(), height); tls_entry.insert_stage = in_progress; pop.persist(&(tls_entry.insert_stage), @@ -2577,7 +2620,8 @@ class swmr_skip_list { next_nodes[level]); assert(prev_nodes[level]->next(level) == n->next(level)); - prev_nodes[level]->set_next(pop, level, new_node); +// prev_nodes[level]->set_next(pop, level, new_node); + prev_nodes[level]->set_next(level, new_node); } #ifndef NDEBUG @@ -2790,7 +2834,9 @@ class swmr_skip_list { ++level) { assert(prev_nodes[level]->height() > level); assert(next_nodes[level].get() == erase_node); - prev_nodes[level]->set_next_tx(level, +// prev_nodes[level]->set_next_tx(level, +// erase_node->next(level)); + prev_nodes[level]->set_next(level, erase_node->next(level)); } @@ -3079,7 +3125,8 @@ class swmr_skip_list { /* Otherwise, node already linked on * this layer */ assert(n->next(level) == next_nodes[level]); - prev_nodes[level]->set_next(pop, level, node); +// prev_nodes[level]->set_next(pop, level, node); + prev_nodes[level]->set_next(level, node); } } @@ -3121,36 +3168,6 @@ class swmr_skip_list { obj::p on_init_size; }; /* class swmr_skip_list */ -template -class map_traits { -public: - static constexpr size_t max_level = MAX_LEVEL; - using random_generator_type = RND_GENERATOR; - using key_type = Key; - using mapped_type = Value; - using compare_type = KeyCompare; - using value_type = pair; - using reference = value_type &; - using const_reference = const value_type &; - using allocator_type = Allocator; - - /** - * pmem::detail::swmr_skip_list allows multimapping. If this flag - * is true we can store multiple entries with the same key. For - * concurrent_map it should be false; For concurrent_multimap it should - * be true; - */ - constexpr static bool allow_multimapping = AllowMultimapping; - - static const key_type & - get_key(const_reference val) - { - return val.first; - } -}; /* class map_traits */ - } /* namespace detail */ } /* namespace pmem */ diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 84f903bca3..e9d8d93a31 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -313,11 +313,20 @@ if(TEST_SELF_RELATIVE_POINTER) build_test(self_relative_ptr_arith ptr/self_relative_ptr_arith.cpp) add_test_generic(NAME self_relative_ptr_arith TRACERS none memcheck pmemcheck) + build_test(pa_self_relative_ptr_arith ptr/pa_self_relative_ptr_arith.cpp) + add_test_generic(NAME pa_self_relative_ptr_arith TRACERS none memcheck pmemcheck) + build_test(self_relative_ptr_atomic ptr/self_relative_ptr_atomic.cpp) add_test_generic(NAME self_relative_ptr_atomic TRACERS none memcheck drd helgrind) + build_test(pa_self_relative_ptr_atomic ptr/pa_self_relative_ptr_atomic.cpp) + add_test_generic(NAME pa_self_relative_ptr_atomic TRACERS none memcheck drd helgrind) + build_test(self_relative_ptr_atomic_pmem ptr/self_relative_ptr_atomic_pmem.cpp) add_test_generic(NAME self_relative_ptr_atomic_pmem TRACERS none memcheck pmemcheck drd helgrind) + + build_test(pa_self_relative_ptr_atomic_pmem ptr/pa_self_relative_ptr_atomic_pmem.cpp) + add_test_generic(NAME pa_self_relative_ptr_atomic_pmem TRACERS none memcheck pmemcheck drd helgrind) endif() add_subdirectory(external) @@ -967,6 +976,26 @@ if(TEST_CONCURRENT_MAP) endif() endif() +################################################################################ +################################## SWMR_MAP ############################## +if(TEST_SWMR_MAP) + build_test(swmr_map swmr_map/swmr_map.cpp) + # XXX: Add helgrind tracer for this test when we will fix false-positive with lock order + add_test_generic(NAME swmr_map TRACERS none memcheck pmemcheck drd) + + build_test(swmr_map_singlethread swmr_map/swmr_map_singlethread.cpp) + add_test_generic(NAME swmr_map_singlethread TRACERS none memcheck pmemcheck) + + build_test(swmr_map_tx swmr_map/swmr_map_tx.cpp) + add_test_generic(NAME swmr_map_tx TRACERS none memcheck pmemcheck) + + build_test(swmr_map_insert_reopen swmr_map/swmr_map_insert_reopen.cpp) + add_test_generic(NAME swmr_map_insert_reopen CASE 0 TRACERS none memcheck pmemcheck + SCRIPT concurrent_hash_map/check_is_pmem.cmake) + +endif() + + ################################################################################ #################################### MPSC_QUEUE ################################ if(TEST_MPSC_QUEUE) diff --git a/tests/ptr/ptr.hpp b/tests/ptr/ptr.hpp index a2d1d0eba6..5b5c777e7f 100644 --- a/tests/ptr/ptr.hpp +++ b/tests/ptr/ptr.hpp @@ -4,6 +4,7 @@ #include "unittest.hpp" #include +#include #include #include #include @@ -31,6 +32,12 @@ assert_if_oid_is_null(nvobjexp::self_relative_ptr &f) { } +template <> +void +assert_if_oid_is_null(nvobjexp::pa_self_relative_ptr &f) +{ +} + /* * test_null_ptr -- verifies if the pointer correctly behaves like a * nullptr-value diff --git a/tests/swmr_map/pmreorder.conf b/tests/swmr_map/pmreorder.conf new file mode 100644 index 0000000000..3d6748e9cf --- /dev/null +++ b/tests/swmr_map/pmreorder.conf @@ -0,0 +1,6 @@ +{ + "pmemobj_open" : "NoReorderNoCheck", + "pmemobj_close" : "NoReorderNoCheck", + "pmemobj_alloc" : "NoReorderNoCheck", + "pmemobj_xalloc" : "NoReorderNoCheck" +} diff --git a/tests/swmr_map/swmr_map.cpp b/tests/swmr_map/swmr_map.cpp new file mode 100644 index 0000000000..ca5bea5dfe --- /dev/null +++ b/tests/swmr_map/swmr_map.cpp @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2020, Intel Corporation */ + +/* + * swmr_map.cpp -- pmem::obj::experimental::swmr_map test + * + */ + +#include "thread_helpers.hpp" +#include "unittest.hpp" + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#define LAYOUT "swmr_map" + +namespace nvobj = pmem::obj; + +namespace +{ + +struct hetero_less { + using is_transparent = void; + template + bool + operator()(const T1 &lhs, const T2 &rhs) const + { + return lhs < rhs; + } +}; + +typedef nvobj::experimental::swmr_map, nvobj::p> + persistent_map_type_int; + +typedef nvobj::experimental::swmr_map + persistent_map_type_string; + +struct root { + nvobj::persistent_ptr cons1; + nvobj::persistent_ptr cons2; +}; + +std::string +gen_key(persistent_map_type_string &, int i) +{ + return std::to_string(i); +} + +int +gen_key(persistent_map_type_int &, int i) +{ + return i; +} + +template +void +check_sorted(MapType *map) +{ + using value_type = typename MapType::value_type; + UT_ASSERT(std::is_sorted( + map->begin(), map->end(), + [](const value_type &lhs, const value_type &rhs) { + return lhs.first < rhs.first; + })); +} + +/* + * emplace_and_lookup_test -- (internal) test emplace and lookup operations + */ +template +void +emplace_and_lookup_test(nvobj::pool &pop, MapType *map) +{ + const size_t NUMBER_ITEMS_INSERT = 50; + + // Adding more concurrency will increase DRD test time + const size_t concurrency = 8; + + size_t TOTAL_ITEMS = NUMBER_ITEMS_INSERT * concurrency; + + UT_ASSERT(map != nullptr); + + map->runtime_initialize(); + + parallel_exec(concurrency, [&](size_t thread_id) { + int begin = thread_id * NUMBER_ITEMS_INSERT; + int end = begin + int(NUMBER_ITEMS_INSERT); + for (int i = begin; i < end; ++i) { + auto ret = map->emplace(gen_key(*map, i), + gen_key(*map, i)); + UT_ASSERT(ret.second == true); + + UT_ASSERT(map->count(gen_key(*map, i)) == 1); + + typename MapType::iterator it = + map->find(gen_key(*map, i)); + UT_ASSERT(it != map->end()); + UT_ASSERT(it->first == gen_key(*map, i)); + UT_ASSERT(it->second == gen_key(*map, i)); + } + + for (int i = begin; i < end; ++i) { + typename MapType::const_iterator it = + map->find(gen_key(*map, i)); + UT_ASSERT(it != map->end()); + UT_ASSERT(it->first == gen_key(*map, i)); + UT_ASSERT(it->second == gen_key(*map, i)); + } + }); + + check_sorted(map); + + UT_ASSERT(map->size() == TOTAL_ITEMS); + + UT_ASSERT(std::distance(map->begin(), map->end()) == int(TOTAL_ITEMS)); + + check_sorted(map); + + map->runtime_initialize(); + + UT_ASSERT(map->size() == TOTAL_ITEMS); + + map->runtime_initialize(); + + UT_ASSERT(map->size() == TOTAL_ITEMS); + + map->clear(); + + UT_ASSERT(map->size() == 0); + + UT_ASSERT(std::distance(map->begin(), map->end()) == 0); +} +/* + * emplace_and_lookup_duplicates_test -- (internal) test emplace and lookup + * operations with duplicates + */ +template +void +emplace_and_lookup_duplicates_test(nvobj::pool &pop, MapType *map) +{ + const size_t NUMBER_ITEMS_INSERT = 50; + + // Adding more concurrency will increase DRD test time + const size_t concurrency = 4; + + UT_ASSERT(map != nullptr); + + map->runtime_initialize(); + + std::vector threads; + threads.reserve(concurrency * 2); + + for (size_t i = 0; i < concurrency; ++i) { + threads.emplace_back([&]() { + for (int i = 0; + i < static_cast(NUMBER_ITEMS_INSERT); ++i) { + map->emplace(gen_key(*map, i), + gen_key(*map, i)); + } + }); + } + + for (size_t i = 0; i < concurrency; ++i) { + threads.emplace_back([&]() { + for (int i = 0; + i < static_cast(NUMBER_ITEMS_INSERT); ++i) { + auto it = map->find(gen_key(*map, i)); + + if (it != map->end()) { + UT_ASSERT(it->first == + gen_key(*map, i)); + UT_ASSERT(it->second == + gen_key(*map, i)); + } + } + }); + } + + for (auto &t : threads) { + t.join(); + } + + check_sorted(map); + + for (auto &e : *map) { + UT_ASSERT(e.first == e.second); + } + + UT_ASSERT(map->size() == NUMBER_ITEMS_INSERT); + + UT_ASSERT(std::distance(map->begin(), map->end()) == + static_cast(NUMBER_ITEMS_INSERT)); + + check_sorted(map); +} +} + +static void +test(int argc, char *argv[]) +{ + if (argc < 2) { + UT_FATAL("usage: %s file-name", argv[0]); + } + + const char *path = argv[1]; + + nvobj::pool pop; + + try { + pop = nvobj::pool::create( + path, LAYOUT, PMEMOBJ_MIN_POOL * 20, S_IWUSR | S_IRUSR); + nvobj::transaction::run(pop, [&] { + pop.root()->cons1 = nvobj::make_persistent< + persistent_map_type_int>(); + pop.root()->cons2 = nvobj::make_persistent< + persistent_map_type_string>(); + }); + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + emplace_and_lookup_test(pop, pop.root()->cons1.get()); + emplace_and_lookup_duplicates_test(pop, pop.root()->cons1.get()); + + emplace_and_lookup_test(pop, pop.root()->cons2.get()); + emplace_and_lookup_duplicates_test(pop, pop.root()->cons2.get()); + + nvobj::transaction::run(pop, [&] { + nvobj::delete_persistent( + pop.root()->cons1); + nvobj::delete_persistent( + pop.root()->cons2); + }); + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} diff --git a/tests/swmr_map/swmr_map_insert_reopen.cpp b/tests/swmr_map/swmr_map_insert_reopen.cpp new file mode 100644 index 0000000000..7dcb3ccc9c --- /dev/null +++ b/tests/swmr_map/swmr_map_insert_reopen.cpp @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2020, Intel Corporation */ + +/* + * swmr_map_insert_reopen.cpp -- pmem::obj::experimental::swmr_map + * test + * + */ + +#include "unittest.hpp" +#include + +template +void +check_size(MapType *map, size_t expected_size) +{ + UT_ASSERT(map->size() == expected_size); + UT_ASSERT(std::distance(map->begin(), map->end()) == + int(expected_size)); +} + +namespace nvobj = pmem::obj; + +typedef nvobj::experimental::swmr_map, nvobj::p> + persistent_map_type; + +struct root { + nvobj::persistent_ptr cons; +}; + +static const std::string LAYOUT = "swmr_map"; + +/* + * insert_reopen_test -- (internal) test insert operations and verify + * consistency after reopen + * pmem::obj::experimental::swmr_map, nvobj::p > + */ +void +insert_reopen_test(nvobj::pool &pop, std::string path, + size_t concurrency = 4) +{ + + using value_type = persistent_map_type::value_type; + PRINT_TEST_PARAMS; + + size_t thread_items = 50; + const size_t expected_size = thread_items * concurrency; + + { + auto map = pop.root()->cons; + + UT_ASSERT(map != nullptr); + + map->runtime_initialize(); + + parallel_exec(concurrency, [&](size_t thread_id) { + int begin = thread_id * thread_items; + int end = begin + int(thread_items); + for (int i = begin; i < end; ++i) { + map->insert(value_type(i, i)); + } + }); + + check_size(map.get(), expected_size); + + map->insert(value_type(expected_size + 1, 1)); + map->unsafe_erase(expected_size + 1); + + pop.close(); + } + + { + pop = nvobj::pool::open(path, LAYOUT); + + auto map = pop.root()->cons; + + UT_ASSERT(map != nullptr); + + map->runtime_initialize(); + + check_size(map.get(), expected_size); + + parallel_exec(concurrency, [&](size_t thread_id) { + int begin = thread_id * thread_items; + int end = begin + int(thread_items); + for (int i = begin; i < end; ++i) { + map->insert( + value_type(i + int(expected_size), i)); + } + }); + + check_size(map.get(), expected_size * 2); + } +} + +static void +test(int argc, char *argv[]) +{ + if (argc < 2) { + UT_FATAL("usage: %s file-name", argv[0]); + } + + const char *path = argv[1]; + + nvobj::pool pop; + + try { + pop = nvobj::pool::create( + path, LAYOUT, PMEMOBJ_MIN_POOL * 20, S_IWUSR | S_IRUSR); + pmem::obj::transaction::run(pop, [&] { + pop.root()->cons = + nvobj::make_persistent(); + }); + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + size_t concurrency = 8; + if (On_drd) + concurrency = 2; + std::cout << "Running tests for " << concurrency << " threads" + << std::endl; + + insert_reopen_test(pop, path, concurrency); + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} diff --git a/tests/swmr_map/swmr_map_pmreorder_simple.cpp b/tests/swmr_map/swmr_map_pmreorder_simple.cpp new file mode 100644 index 0000000000..ce05438c5c --- /dev/null +++ b/tests/swmr_map/swmr_map_pmreorder_simple.cpp @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2020, Intel Corporation */ + +/* + * swmr_map_reorder_simple.cpp -- pmem::obj::experimental::swmr_map + * test + * + */ + +#include "helper_algorithm.hpp" +#include "unittest.hpp" + +#include +#include +#include +#include + +#include +#include + +#include + +#define LAYOUT "persistent_swmr_map" + +namespace nvobj = pmem::obj; + +namespace +{ + +typedef nvobj::experimental::swmr_map, nvobj::p> + persistent_map_type; + +typedef persistent_map_type::value_type value_type; + +struct root { + nvobj::persistent_ptr cons; +}; + +static constexpr int elements[] = {2, 257, 513, 1, 123, 713}; + +static constexpr int len_elements = + static_cast(sizeof(elements) / sizeof(elements[0])); + +/* + * check_exist -- (internal) check existence of an element + */ +void +check_exist(nvobj::persistent_ptr &map, int element) +{ + auto it = map->find(element); + UT_ASSERTeq(it != map->end(), true); + + UT_ASSERTeq(it->first, element); + UT_ASSERTeq(it->second, element); +} + +/* + * test_init -- (internal) init test + * pmem::obj::swmr_map, nvobj::p > + */ +void +test_init(nvobj::pool &pop) +{ + auto persistent_map = pop.root()->cons; + persistent_map->runtime_initialize(); + + for (int i = 0; i < len_elements / 2; i++) { + persistent_map->insert(value_type(elements[i], elements[i])); + check_exist(persistent_map, elements[i]); + } +} + +/* + * test_map -- (internal) test + * pmem::obj::experimental::swmr_map, nvobj::p > + */ +void +test_insert(nvobj::pool &pop) +{ + auto persistent_map = pop.root()->cons; + + persistent_map->runtime_initialize(); + + for (int i = len_elements / 2; i < len_elements; i++) { + persistent_map->insert(value_type(elements[i], elements[i])); + check_exist(persistent_map, elements[i]); + } +} + +void +check_consistency(nvobj::pool &pop) +{ + auto persistent_map = pop.root()->cons; + persistent_map->runtime_initialize(); + + auto size = static_cast( + persistent_map->size()); + + UT_ASSERTeq( + std::distance(persistent_map->begin(), persistent_map->end()), + size); + + UT_ASSERT(size <= len_elements); + + for (int i = 0; i < size; i++) { + UT_ASSERTeq(persistent_map->count(elements[i]), 1); + check_exist(persistent_map, elements[i]); + } + + for (int i = size; + i < static_cast(sizeof(elements) / sizeof(elements[0])); + i++) { + UT_ASSERTeq(persistent_map->count(elements[i]), 0); + } + + auto value_comp = [](const value_type &lhs, const value_type &rhs) { + return lhs.first < rhs.first; + }; + std::is_sorted(persistent_map->begin(), persistent_map->end(), + value_comp); + + is_strictly_increased(persistent_map->begin(), persistent_map->end(), + value_comp); +} +} + +static void +test(int argc, char *argv[]) +{ + if (argc != 3 || strchr("coi", argv[1][0]) == nullptr) + UT_FATAL("usage: %s file-name", argv[0]); + + const char *path = argv[2]; + + nvobj::pool pop; + + try { + if (argv[1][0] == 'o') { + pop = nvobj::pool::open(path, LAYOUT); + + check_consistency(pop); + } else if (argv[1][0] == 'c') { + pop = nvobj::pool::create(path, LAYOUT, + PMEMOBJ_MIN_POOL * 20, + S_IWUSR | S_IRUSR); + + pmem::obj::transaction::run(pop, [&] { + pop.root()->cons = nvobj::make_persistent< + persistent_map_type>(); + }); + + test_init(pop); + } else if (argv[1][0] == 'i') { + pop = nvobj::pool::open(path, LAYOUT); + + test_insert(pop); + } + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} diff --git a/tests/swmr_map/swmr_map_singlethread.cpp b/tests/swmr_map/swmr_map_singlethread.cpp new file mode 100644 index 0000000000..40eb4f6286 --- /dev/null +++ b/tests/swmr_map/swmr_map_singlethread.cpp @@ -0,0 +1,701 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2020, Intel Corporation */ + +/* + * swmr_map_singlethread.cpp -- pmem::obj::experimental::swmr_map + * basic tests + * + */ + +#include "unittest.hpp" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define LAYOUT "swmr_map" + +namespace nvobj = pmem::obj; + +namespace +{ + +typedef nvobj::experimental::swmr_map, nvobj::p> + persistent_map_type; + +typedef persistent_map_type::value_type value_type; + +struct move_element { + move_element(int val) noexcept : val(val) + { + } + + move_element(const move_element &) = delete; + + move_element &operator=(const move_element &) = delete; + + move_element(move_element &&e) noexcept : val(e.val) + { + e.val = 0; + } + + move_element & + operator=(move_element &&e) + { + val = e.val; + e.val = 0; + return *this; + } + + nvobj::p val; +}; + +typedef nvobj::experimental::swmr_map, move_element> + persistent_map_move_type; + +typedef persistent_map_move_type::value_type value_move_type; + +struct hetero_less { + using is_transparent = void; + template + bool + operator()(const T1 &lhs, const T2 &rhs) const + { + return lhs < rhs; + } +}; + +inline std::string +gen_hetero(int v) +{ + return std::to_string(v); +} + +typedef nvobj::experimental::swmr_map + persistent_map_string_type; + +struct root { + nvobj::persistent_ptr map1; + nvobj::persistent_ptr map2; + + nvobj::persistent_ptr map_move; + + nvobj::persistent_ptr map_string; +}; + +void +verify_elements(persistent_map_type &map, size_t elements) +{ + UT_ASSERT(map.size() == elements); + + for (int i = 0; i < static_cast(elements); i++) { + UT_ASSERT(map.count(i) == 1); + } +} + +/** + * Wrapper around PMDK allocator + * @throw std::bad_alloc on allocation failure. + */ +template +void +tx_alloc_wrapper(nvobj::pool_base &pop, nvobj::persistent_ptr &ptr, + Args &&... args) +{ + try { + nvobj::transaction::manual tx(pop); + ptr = nvobj::make_persistent(std::forward(args)...); + nvobj::transaction::commit(); + } catch (...) { + throw std::bad_alloc(); + } +} + +/* + * swap_test -- (internal) test swap method + * pmem::obj::swmr_map, nvobj::p > + */ +void +swap_test(nvobj::pool &pop) +{ + auto &map1 = pop.root()->map1; + auto &map2 = pop.root()->map2; + + tx_alloc_wrapper(pop, map1); + tx_alloc_wrapper(pop, map2); + + for (int i = 0; i < 50; i++) { + auto ret = map1->insert(value_type(i, i)); + UT_ASSERT(ret.second == true); + } + + for (int i = 0; i < 300; i++) { + auto ret = map2->insert(value_type(i, i)); + UT_ASSERT(ret.second == true); + } + + map1->swap(*map2); + verify_elements(*map1, 300); + verify_elements(*map2, 50); + + pmem::detail::destroy(*map1); + pmem::detail::destroy(*map2); +} + +/* + * access_test -- (internal) test access methods + * pmem::obj::swmr_map, nvobj::p > + */ +void +access_test(nvobj::pool &pop) +{ + auto &map1 = pop.root()->map1; + + tx_alloc_wrapper(pop, map1); + + for (int i = 0; i < 100; i++) { + auto ret = map1->insert(value_type(i, i)); + UT_ASSERT(ret.second == true); + } + + UT_ASSERT(map1->begin() == + const_cast(map1.get())->begin()); + UT_ASSERT(map1->end() == + const_cast(map1.get())->end()); + + int i = 0; + auto it = map1->begin(); + auto const_it = + const_cast(map1.get())->begin(); + while (it != map1->end()) { + UT_ASSERT(it->first == const_it->first); + UT_ASSERT(it->second == const_it->second); + + i++; + it++; + const_it++; + } + + UT_ASSERT(static_cast(i) == map1->size()); + + pmem::detail::destroy(*map1); +} + +/* + * insert_test -- (internal) test insert methods + * pmem::obj::swmr_map, nvobj::p > + */ +void +insert_test(nvobj::pool &pop) +{ + auto &map1 = pop.root()->map1; + auto &map_move = pop.root()->map_move; + + tx_alloc_wrapper(pop, map1); + + { + auto ret = map1->insert(value_type(1, 1)); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 1); + UT_ASSERTeq(ret.first->second, 1); + } + + { + auto ret = map1->insert(value_type(2, 2)); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 2); + UT_ASSERTeq(ret.first->second, 2); + } + + tx_alloc_wrapper(pop, map_move); + + { + value_move_type e(3, 3); + + auto ret = map_move->insert(std::move(e)); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 3); + UT_ASSERTeq(ret.first->second.val, 3); + } + + { + value_move_type e(4, 4); + + auto ret = map_move->insert(std::move(e)); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 4); + UT_ASSERTeq(ret.first->second.val, 4); + } + + { + value_move_type e(5, 5); + + auto ret = map_move->insert(std::move(e)); + UT_ASSERTeq(ret.second, true); + + using const_iterator = + typename persistent_map_move_type::const_iterator; + const_iterator it = map_move->find(5); + + UT_ASSERT(it != map_move->end()); + + UT_ASSERTeq(it->first, 5); + UT_ASSERTeq(it->second.val, 5); + } + + { + value_move_type e(6, 6); + + auto ret = map_move->insert(std::move(e)); + UT_ASSERTeq(ret.second, true); + + using iterator = typename persistent_map_move_type::iterator; + iterator it = map_move->find(6); + + UT_ASSERT(it != map_move->end()); + + UT_ASSERTeq(it->first, 6); + UT_ASSERTeq(it->second.val, 6); + } + + { + std::vector v = {value_type(11, 11), + value_type(12, 12), + value_type(13, 13)}; + + map1->insert(v.begin(), v.end()); + + for (auto &e : v) + UT_ASSERTeq(map1->count(e.first), 1); + } + + { + map1->insert(std::initializer_list{ + value_type(21, 21), value_type(22, 22)}); + + UT_ASSERTeq(map1->count(21), 1); + UT_ASSERTeq(map1->count(22), 1); + } + + pmem::detail::destroy(*map1); + pmem::detail::destroy(*map_move); +} + +/* + * emplace_test -- (internal) test emplace method + */ +void +emplace_test(nvobj::pool &pop) +{ + auto &map = pop.root()->map1; + auto &map_move = pop.root()->map_move; + auto &map_string = pop.root()->map_string; + + tx_alloc_wrapper(pop, map); + + { + auto ret = map->emplace(value_type(1, 1)); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 1); + UT_ASSERTeq(ret.first->second, 1); + } + + { + auto ret = map->emplace(value_type(1, 2)); + UT_ASSERTeq(ret.second, false); + UT_ASSERTeq(ret.first->first, 1); + UT_ASSERTeq(ret.first->second, 1); + } + + { + auto ret = map->emplace(2, 2); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 2); + UT_ASSERTeq(ret.first->second, 2); + } + + { + auto ret = map->emplace(2, 3); + UT_ASSERTeq(ret.second, false); + UT_ASSERTeq(ret.first->first, 2); + UT_ASSERTeq(ret.first->second, 2); + } + + tx_alloc_wrapper(pop, map_move); + + { + value_move_type e(3, 3); + + auto ret = map_move->emplace(std::move(e)); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 3); + UT_ASSERTeq(ret.first->second.val, 3); + } + + { + move_element m(4); + auto ret = map_move->emplace(4, std::move(m)); + UT_ASSERTeq(ret.second, true); + UT_ASSERTeq(ret.first->first, 4); + UT_ASSERTeq(ret.first->second.val, 4); + UT_ASSERTeq(m.val, 0); // m was moved + } + + { + move_element m(5); + auto ret = map_move->try_emplace(4, std::move(m)); + UT_ASSERTeq(ret.second, false); + UT_ASSERTeq(ret.first->first, 4); + UT_ASSERTeq(ret.first->second.val, 4); + UT_ASSERTeq(m.val, 5); // m was not moved + } + + tx_alloc_wrapper(pop, map_string); + { + std::string str("foo"); + auto ret = map_string->emplace(str, str); + UT_ASSERTeq(ret.second, true); + UT_ASSERT(ret.first->first == str); + UT_ASSERT(ret.first->second == str); + } + + { + auto ret = map_string->emplace("bar", "bar"); + UT_ASSERTeq(ret.second, true); + UT_ASSERT(ret.first->first == "bar"); + UT_ASSERT(ret.first->second == "bar"); + } + + { + std::string str("foo\0bar", 7); + auto ret = map_string->emplace( + std::piecewise_construct, + std::forward_as_tuple(str.c_str(), str.size()), + std::forward_as_tuple(str.c_str(), str.size())); + UT_ASSERTeq(ret.second, true); + UT_ASSERT(ret.first->first == str); + UT_ASSERT(ret.first->second == str); + } + + { + std::string str("bar\0foo", 7); + auto ret = + map_string->try_emplace(str, str.c_str(), str.size()); + UT_ASSERTeq(ret.second, true); + UT_ASSERT(ret.first->first == str); + UT_ASSERT(ret.first->second == str); + + ret = map_string->try_emplace(str, "new_value"); + UT_ASSERTeq(ret.second, false); + UT_ASSERT(ret.first->first == str); + UT_ASSERT(ret.first->second == str); + } + + pmem::detail::destroy(*map); + pmem::detail::destroy(*map_move); + pmem::detail::destroy(*map_string); +} + +template +void +bound_helper(nvobj::persistent_ptr &m) +{ + using iterator = + typename std::conditional::type; + + using map_ptr_type = + typename std::conditional::type; + map_ptr_type map = m.get(); + + for (int i = 0; i < 298; ++i) { + iterator lb_it = map->lower_bound(i); + iterator ub_it = map->upper_bound(i); + UT_ASSERT(lb_it != map->end()); + UT_ASSERT(ub_it != map->end()); + + std::pair range = map->equal_range(i); + + if (i % 2 == 0) { + UT_ASSERT(lb_it != ub_it); + UT_ASSERTeq(lb_it->first, i); + UT_ASSERTeq(lb_it->second, i); + UT_ASSERTeq(ub_it->first, i + 2); + UT_ASSERTeq(ub_it->second, i + 2); + + UT_ASSERTeq(std::distance(range.first, range.second), + 1); + UT_ASSERTeq(range.first->first, i); + UT_ASSERTeq(range.first->second, i); + UT_ASSERTeq(range.second->first, i + 2); + UT_ASSERTeq(range.second->second, i + 2); + } else { + UT_ASSERT(lb_it == ub_it); + UT_ASSERTeq(lb_it->first, i + 1); + UT_ASSERTeq(lb_it->second, i + 1); + UT_ASSERTeq(ub_it->first, i + 1); + UT_ASSERTeq(ub_it->second, i + 1); + + UT_ASSERTeq(std::distance(range.first, range.second), + 0); + UT_ASSERTeq(range.first->first, i + 1); + UT_ASSERTeq(range.first->second, i + 1); + UT_ASSERTeq(range.second->first, i + 1); + UT_ASSERTeq(range.second->second, i + 1); + } + } + + iterator lb_it = map->lower_bound(298); + UT_ASSERT(lb_it != map->end()); + UT_ASSERTeq(lb_it->first, 298); + UT_ASSERTeq(lb_it->second, 298); + + iterator ub_it = map->upper_bound(298); + UT_ASSERT(ub_it == map->end()); + + lb_it = map->lower_bound(300); + UT_ASSERT(lb_it == map->end()); + + ub_it = map->upper_bound(300); + UT_ASSERT(ub_it == map->end()); + + lb_it = map->lower_bound(-1); + UT_ASSERT(lb_it == map->begin()); + UT_ASSERTeq(lb_it->first, 0); + UT_ASSERTeq(lb_it->second, 0); + + ub_it = map->upper_bound(-1); + UT_ASSERT(ub_it == map->begin()); + UT_ASSERTeq(ub_it->first, 0); + UT_ASSERTeq(ub_it->second, 0); + + std::pair range = map->equal_range(-1); + UT_ASSERTeq(std::distance(range.first, range.second), 0); + UT_ASSERT(range.first == range.second); + UT_ASSERT(range.first == map->begin()); + + range = map->equal_range(298); + UT_ASSERTeq(std::distance(range.first, range.second), 1); + UT_ASSERT(range.first != map->end()); + UT_ASSERT(range.second == map->end()); + UT_ASSERTeq(range.first->first, 298); + UT_ASSERTeq(range.first->second, 298); + + range = map->equal_range(300); + UT_ASSERTeq(std::distance(range.first, range.second), 0); + UT_ASSERT(range.first == map->end()); + UT_ASSERT(range.second == map->end()); +} + +/* + * bound_test -- (internal) test upper_bound, lower_bound and equal_range + * methods pmem::obj::swmr_map, nvobj::p > + */ +void +bound_test(nvobj::pool &pop) +{ + auto &map1 = pop.root()->map1; + + tx_alloc_wrapper(pop, map1); + + for (int i = 0; i < 300; i += 2) { + auto ret = map1->insert(value_type(i, i)); + UT_ASSERT(ret.second == true); + } + + bound_helper(map1); + bound_helper(map1); + + pmem::detail::destroy(*map1); +} + +/* + * erase_test -- (internal) test erase methods + * pmem::obj::swmr_map, nvobj::p > + */ +void +erase_test(nvobj::pool &pop) +{ + auto &map1 = pop.root()->map1; + + tx_alloc_wrapper(pop, map1); + + for (int i = 0; i < 300; ++i) { + auto ret = map1->insert(value_type(i, i)); + UT_ASSERT(ret.second == true); + } + + auto last = map1->find(150); + UT_ASSERT(last != map1->end()); + + auto ret = map1->unsafe_erase(map1->begin(), last); + + UT_ASSERT(map1->begin() == ret); + UT_ASSERTeq(150, std::distance(map1->begin(), map1->end())); + + for (int i = 0; i < 150; ++i) { + UT_ASSERT(map1->contains(i) == false); + } + + for (int i = 150; i < 300; ++i) { + UT_ASSERT(map1->contains(i)); + } + + UT_ASSERTeq(map1->unsafe_erase(150), size_t(1)); + UT_ASSERT(map1->find(150) == map1->end()); + + persistent_map_type::const_iterator const_it = map1->find(151); + UT_ASSERT(const_it != map1->end()); + persistent_map_type::iterator it = map1->unsafe_erase(const_it); + UT_ASSERT(map1->contains(151) == false); + UT_ASSERT(it != map1->end()); + UT_ASSERTeq(it->first, 152); + UT_ASSERTeq(it->second, 152); + it = map1->unsafe_erase(it); + UT_ASSERT(map1->contains(152) == false); + UT_ASSERT(it != map1->end()); + UT_ASSERTeq(it->first, 153); + UT_ASSERTeq(it->second, 153); + + pmem::detail::destroy(*map1); +} + +template +void +hetero_helper(nvobj::persistent_ptr &m) +{ + using iterator = + typename std::conditional::type; + + using map_ptr_type = + typename std::conditional::type; + + using value_type = typename MapType::value_type; + + map_ptr_type map = m.get(); + + UT_ASSERT(std::is_sorted( + map->begin(), map->end(), + [](const value_type &lhs, const value_type &rhs) { + return lhs.first < rhs.first; + })); + + for (int i = 0; i < 100; ++i) { + UT_ASSERT(map->contains(gen_hetero(i))); + } + + for (int i = 0; i < 100; ++i) { + iterator it = map->find(gen_hetero(i)); + + UT_ASSERT(it != map->end()); + UT_ASSERT(it->first == gen_hetero(i)); + UT_ASSERT(it->second == gen_hetero(i)); + } + + for (int i = 0; i < 99; ++i) { + iterator lb_it = map->lower_bound(gen_hetero(i)); + iterator ub_it = map->upper_bound(gen_hetero(i)); + UT_ASSERT(lb_it != map->end()); + UT_ASSERT(ub_it != map->end()); + UT_ASSERT(lb_it != ub_it); + UT_ASSERT(lb_it->first == gen_hetero(i)); + UT_ASSERT(lb_it->second == gen_hetero(i)); + + // make sure that ub_it points to the next after lb_it + UT_ASSERTeq(std::distance(lb_it, ub_it), 1); + + UT_ASSERT(ub_it->first == ub_it->second); + + std::pair range = + map->equal_range(gen_hetero(i)); + UT_ASSERTeq(std::distance(range.first, range.second), 1); + UT_ASSERT(range.first->first == gen_hetero(i)); + UT_ASSERT(range.first->second == gen_hetero(i)); + UT_ASSERT(range.second->first == range.second->second); + } +} + +/* + * hetero_test -- (internal) test heterogeneous contains/count/find/erase + * methods pmem::obj::swmr_map > + */ +void +hetero_test(nvobj::pool &pop) +{ + auto &map = pop.root()->map_string; + + tx_alloc_wrapper(pop, map); + + for (int i = 0; i < 100; ++i) { + map->emplace(gen_hetero(i), gen_hetero(i)); + } + + hetero_helper(map); + hetero_helper(map); + + for (int i = 0; i < 100; ++i) { + UT_ASSERTeq(map->unsafe_erase(gen_hetero(i)), 1); + } + + for (int i = 0; i < 100; ++i) { + UT_ASSERTeq(map->count(gen_hetero(i)), 0); + } + + for (int i = 0; i < 100; ++i) { + UT_ASSERT(map->contains(gen_hetero(i)) == false); + } + + pmem::detail::destroy(*map); +} +} + +static void +test(int argc, char *argv[]) +{ + if (argc < 2) { + UT_FATAL("usage: %s file-name", argv[0]); + } + + const char *path = argv[1]; + + nvobj::pool pop; + + try { + pop = nvobj::pool::create( + path, LAYOUT, PMEMOBJ_MIN_POOL * 20, S_IWUSR | S_IRUSR); + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + access_test(pop); + swap_test(pop); + insert_test(pop); + emplace_test(pop); + bound_test(pop); + erase_test(pop); + hetero_test(pop); + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} diff --git a/tests/swmr_map/swmr_map_tx.cpp b/tests/swmr_map/swmr_map_tx.cpp new file mode 100644 index 0000000000..e618e3c208 --- /dev/null +++ b/tests/swmr_map/swmr_map_tx.cpp @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2019-2020, Intel Corporation */ + +/* + * concurrent_hash_map_tx.cpp -- pmem::obj::experimental::swmr_map test + * + */ + +#include "unittest.hpp" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +#define LAYOUT "swmr_map" + +namespace nvobj = pmem::obj; + +typedef nvobj::experimental::swmr_map, nvobj::p> + persistent_map_type; + +struct root { + nvobj::persistent_ptr map; + nvobj::persistent_ptr map2; +}; + +/* + * It verifies that f() throws transaction_scope_error exception. + */ +void +assert_tx_exception(std::function f) +{ + bool exception_thrown = false; + try { + f(); + UT_ASSERT(0); + } catch (pmem::transaction_scope_error &) { + exception_thrown = true; + } catch (std::exception &e) { + UT_FATALexc(e); + } + + UT_ASSERT(exception_thrown); +} + +void +test_tx_exception(nvobj::pool &pop) +{ + pmem::obj::transaction::run(pop, [&] { + pop.root()->map = nvobj::make_persistent(); + }); + + using value_type = typename persistent_map_type::value_type; + using key_type = typename persistent_map_type::key_type; + + auto map = pop.root()->map; + + map->runtime_initialize(); + + pmem::obj::transaction::run(pop, [&] { + value_type v(0, 0); + assert_tx_exception([&] { (void)map->insert(v); }); + + assert_tx_exception( + [&] { (void)map->insert(std::pair(0, 0)); }); + + assert_tx_exception( + [&] { (void)map->insert(value_type(0, 0)); }); + + assert_tx_exception([&] { + (void)map->insert(map->end(), value_type(0, 0)); + }); + + assert_tx_exception([&] { + (void)map->insert(map->end(), + std::pair(0, 0)); + }); + + std::array arr{ + persistent_map_type::value_type{0, 0}, + persistent_map_type::value_type{1, 1}}; + + assert_tx_exception( + [&] { (void)map->insert(arr.begin(), arr.end()); }); + + assert_tx_exception([&] { + (void)map->insert( + {persistent_map_type::value_type{0, 0}, + persistent_map_type::value_type{1, 1}}); + }); + + assert_tx_exception([&] { (void)map->emplace(0, 0); }); + + assert_tx_exception( + [&] { (void)map->emplace_hint(map->end(), 0, 0); }); + + key_type k(0); + assert_tx_exception([&] { (void)map->try_emplace(k, 0); }); + + assert_tx_exception( + [&] { (void)map->try_emplace(key_type(0), 0); }); + + assert_tx_exception([&] { (void)map->try_emplace(0, 0); }); + + assert_tx_exception([&] { (void)map->unsafe_erase(0); }); + + assert_tx_exception( + [&] { (void)map->unsafe_erase(map->begin()); }); + + assert_tx_exception([&] { + (void)map->unsafe_erase(map->begin(), map->end()); + }); + }); + + pmem::obj::transaction::run(pop, [&] { + pmem::obj::delete_persistent(map); + }); +} + +void +verify_elements(nvobj::pool &pop, int number_of_inserts) +{ + auto map = pop.root()->map; + auto map2 = pop.root()->map2; + + for (int i = 0; i < number_of_inserts; i++) { + auto it = map->find(i); + auto it2 = map2->find(i); + + UT_ASSERT(it->second == i); + UT_ASSERT(it2->second == i + 1); + } +} + +static void +test(int argc, char *argv[]) +{ + if (argc < 2) { + UT_FATAL("usage: %s file-name", argv[0]); + } + + const char *path = argv[1]; + + nvobj::pool pop; + + try { + pop = nvobj::pool::create( + path, LAYOUT, PMEMOBJ_MIN_POOL * 20, S_IWUSR | S_IRUSR); + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + test_tx_exception(pop); + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} From e4f4cc45023b4e660560bd64b41d2d8aba722cb3 Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Thu, 22 Jul 2021 17:47:54 +0800 Subject: [PATCH 03/34] Add new pa_self_relative_ptr (persistent-aware self_relative_ptr) and swmr_map Add some related tests --- tests/ptr/pa_self_relative_ptr_arith.cpp | 38 +++ tests/ptr/pa_self_relative_ptr_atomic.cpp | 10 + tests/ptr/pa_self_relative_ptr_atomic.hpp | 308 ++++++++++++++++++ .../ptr/pa_self_relative_ptr_atomic_pmem.cpp | 94 ++++++ 4 files changed, 450 insertions(+) create mode 100644 tests/ptr/pa_self_relative_ptr_arith.cpp create mode 100644 tests/ptr/pa_self_relative_ptr_atomic.cpp create mode 100644 tests/ptr/pa_self_relative_ptr_atomic.hpp create mode 100644 tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp diff --git a/tests/ptr/pa_self_relative_ptr_arith.cpp b/tests/ptr/pa_self_relative_ptr_arith.cpp new file mode 100644 index 0000000000..b424d5f0ae --- /dev/null +++ b/tests/ptr/pa_self_relative_ptr_arith.cpp @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +#include "ptr_arith.hpp" + +#include +#include + +#define LAYOUT "cpp" + +static void +test(int argc, char *argv[]) +{ + if (argc != 2) + UT_FATAL("usage: %s file-name", argv[0]); + + const char *path = argv[1]; + + nvobj::pool_base pop; + + try { + pop = nvobj::pool_base::create(path, LAYOUT, PMEMOBJ_MIN_POOL, + S_IWUSR | S_IRUSR); + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + test_arith(pop); + test_relational(pop); + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} diff --git a/tests/ptr/pa_self_relative_ptr_atomic.cpp b/tests/ptr/pa_self_relative_ptr_atomic.cpp new file mode 100644 index 0000000000..ecaa9a7e70 --- /dev/null +++ b/tests/ptr/pa_self_relative_ptr_atomic.cpp @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +#include "pa_self_relative_ptr_atomic.hpp" + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} diff --git a/tests/ptr/pa_self_relative_ptr_atomic.hpp b/tests/ptr/pa_self_relative_ptr_atomic.hpp new file mode 100644 index 0000000000..229f15d339 --- /dev/null +++ b/tests/ptr/pa_self_relative_ptr_atomic.hpp @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +#include "thread_helpers.hpp" +#include "unittest.hpp" + +#include +#include + +#include +#include +#include + +constexpr size_t CONCURRENCY = 20; +constexpr size_t MEAN_CONCURRENCY = CONCURRENCY * 2; +constexpr size_t HIGH_CONCURRENCY = CONCURRENCY * 5; + +using pmem::obj::experimental::pa_self_relative_ptr; + +template +using atomic_type = typename std::conditional< + need_volatile, + typename std::add_volatile>>::type, + std::atomic>>::type; + +template +void +test_fetch() +{ + constexpr size_t count_iterations = 300; + constexpr size_t arr_size = CONCURRENCY * count_iterations; + std::vector vptr(arr_size, 0); + + atomic_type ptr{vptr.data()}; + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = ptr.fetch_add(1); + *element += 1; + } + }); + + UT_ASSERT(vptr.data() + arr_size == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 1); + } + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = ptr.fetch_sub(1) - 1; + *element += 1; + } + }); + + UT_ASSERT(vptr.data() == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 2); + } + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = ptr++; + *element += 1; + } + }); + + UT_ASSERT(vptr.data() + arr_size == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 3); + } + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = --ptr; + *element += 1; + } + }); + + UT_ASSERT(vptr.data() == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 4); + } + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = ++ptr - 1; + *element += 1; + } + }); + + UT_ASSERT(vptr.data() + arr_size == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 5); + } + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = ptr-- - 1; + *element += 1; + } + }); + + UT_ASSERT(vptr.data() == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 6); + } + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = (ptr += 1) - 1; + *element += 1; + } + }); + + UT_ASSERT(vptr.data() + arr_size == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 7); + } + + parallel_exec(CONCURRENCY, [&](size_t) { + for (size_t i = 0; i < count_iterations; ++i) { + auto element = (ptr -= 1); + *element += 1; + } + }); + + UT_ASSERT(vptr.data() == ptr.load().get()); + for (auto element : vptr) { + UT_ASSERTeq(element, 8); + } +} + +template +void +test_exchange() +{ + pa_self_relative_ptr first = reinterpret_cast(uintptr_t{0}); + pa_self_relative_ptr second = reinterpret_cast(~uintptr_t{0}); + + atomic_type ptr; + + UT_ASSERT(ptr.load(std::memory_order_acquire).is_null()); + + ptr.store(first, std::memory_order_release); + + UT_ASSERT(ptr.load() == first); + + auto before_ptr = ptr.exchange(second, std::memory_order_acq_rel); + + UT_ASSERT(ptr.load(std::memory_order_acquire) == second); + + parallel_exec(MEAN_CONCURRENCY, [&](size_t i) { + for (size_t j = 0; j < 1000000; j++) { + auto before = ptr.exchange(i % 2 == 0 ? first : second, + std::memory_order_acq_rel); + UT_ASSERT(before == first || before == second); + } + }); + + auto last_ptr = ptr.load(); + UT_ASSERT(last_ptr == first || last_ptr == second); +} + +template +void +test_compare_exchange() +{ + int *first = reinterpret_cast(uintptr_t{0}); + int *second = reinterpret_cast(~uintptr_t{0}); + atomic_type atomic_ptr{first}; + std::atomic exchanged(0); + + parallel_exec(CONCURRENCY, [&](size_t) { + // tst_val != atomic_ptr ==> tst_val is modified + // tst_val == atomic_ptr ==> atomic_ptr is modified + + pa_self_relative_ptr tst_val{first}, new_val{second}; + if (atomic_ptr.compare_exchange_strong(tst_val, new_val)) { + ++exchanged; + } else { + UT_ASSERT(tst_val == new_val); + } + }); + + UT_ASSERTeq(exchanged.load(), 1); + UT_ASSERT(atomic_ptr.load().get() == second); + + atomic_ptr = first; + parallel_exec(CONCURRENCY, [&](size_t) { + // tst_val != atomic_ptr ==> tst_val is modified + // tst_val == atomic_ptr ==> atomic_ptr is modified + + pa_self_relative_ptr tst_val{first}, new_val{second}; + if (atomic_ptr.compare_exchange_strong( + tst_val, new_val, std::memory_order_acquire, + std::memory_order_relaxed)) { + ++exchanged; + } else { + UT_ASSERT(tst_val == new_val); + } + }); + + UT_ASSERTeq(exchanged.load(), 2); + UT_ASSERT(atomic_ptr.load().get() == second); +} + +/** + * Small lock free stack for tests + */ +template +class test_stack { +public: + struct node; + + using value_type = size_t; + using node_ptr_type = pa_self_relative_ptr; + + struct node { + size_t value; + node_ptr_type next; + }; + + void + push(const value_type &data) + { + node_ptr_type new_node = new node{data, nullptr}; + + auto next_node = head.load(std::memory_order_acquire); + + while (!head.compare_exchange_weak(next_node, new_node, + weak_args...)) + ; // empty + new_node->next = next_node; + } + + std::vector + get_all() + { + auto current_node = head.load(); + std::vector values; + while (current_node != nullptr) { + values.push_back(current_node->value); + current_node = current_node->next; + } + return values; + } + + ~test_stack() + { + auto current_node = head.load(); + while (current_node != nullptr) { + auto prev_node = current_node.get(); + current_node = current_node->next; + delete prev_node; + } + } + +private: + atomic_type head; +}; + +template +void +test_stack_based_on_atomic() +{ + test_stack stack; + constexpr size_t count_iterations = 1000; + parallel_exec(HIGH_CONCURRENCY, [&](size_t i) { + for (size_t j = 0; j < count_iterations; j++) { + stack.push(j + (i * count_iterations)); + } + }); + auto all = stack.get_all(); + std::sort(all.begin(), all.end()); + for (size_t i = 0; i < HIGH_CONCURRENCY * count_iterations; i++) { + UT_ASSERTeq(all[i], i); + } +} + +template +void +test_the_stack() +{ + test_stack_based_on_atomic(); + test_stack_based_on_atomic(); +} + +template +void +test_is_lock_free() +{ + atomic_type a; + ((void)(a.is_lock_free())); + ((void)(std::atomic_is_lock_free(&a))); +} + +template +void +test(int argc, char *argv[]) +{ + test_fetch(); + test_exchange(); + test_compare_exchange(); + test_the_stack(); + test_is_lock_free(); +} diff --git a/tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp b/tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp new file mode 100644 index 0000000000..b12b46cc66 --- /dev/null +++ b/tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* Copyright 2021, 4Paradigm Inc. */ + +#include "thread_helpers.hpp" +#include "unittest.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LAYOUT "cpp" + +namespace nvobj = pmem::obj; + +template +using pa_self_relative_ptr = pmem::obj::experimental::pa_self_relative_ptr; +template +using atomic_ptr = std::atomic>; + +constexpr int ARR_SIZE = 10000; + +struct root { + atomic_ptr[ARR_SIZE]> parr; + atomic_ptr ptr; +}; + +namespace +{ + +void +test_ptr_transactional(nvobj::pool &pop) +{ + auto r = pop.root(); + try { + nvobj::transaction::run(pop, [&] { + UT_ASSERT(r->ptr.load() == nullptr); + nvobj::transaction::snapshot>(&r->ptr); + r->ptr = nvobj::make_persistent(); + }); + } catch (...) { + UT_ASSERT(0); + } + + UT_ASSERT(r->ptr.load() != nullptr); + + try { + nvobj::transaction::run(pop, [&] { + nvobj::delete_persistent(r->ptr.load()); + nvobj::transaction::snapshot>(&r->ptr); + r->ptr.store(nullptr); + }); + } catch (...) { + UT_ASSERT(0); + } + + UT_ASSERT(r->ptr.load() == nullptr); +} + +} /* namespace */ + +static void +test(int argc, char *argv[]) +{ + if (argc != 2) + UT_FATAL("usage: %s file-name", argv[0]); + + const char *path = argv[1]; + + nvobj::pool pop; + + try { + pop = nvobj::pool::create(path, LAYOUT, PMEMOBJ_MIN_POOL, + S_IWUSR | S_IRUSR); + } catch (pmem::pool_error &pe) { + UT_FATAL("!pool::create: %s %s", pe.what(), path); + } + + test_ptr_transactional(pop); + + pop.close(); +} + +int +main(int argc, char *argv[]) +{ + return run_test([&] { test(argc, argv); }); +} From 4e5a08b960543479dfd24c457c8e3eea82d46b8c Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 01:33:24 +0800 Subject: [PATCH 04/34] Clear format --- .../atomic_pa_self_relative_ptr.hpp | 98 +++++++---- .../experimental/pa_self_relative_ptr.hpp | 166 ++++++++++-------- .../libpmemobj++/experimental/swmr_map.hpp | 36 ++-- .../experimental/swmr_skip_list_impl.hpp | 145 ++++++++------- tests/ptr/pa_self_relative_ptr.cpp | 3 +- tests/ptr/pa_self_relative_ptr_atomic.hpp | 3 +- tests/swmr_map/swmr_map.cpp | 3 +- tests/swmr_map/swmr_map_singlethread.cpp | 6 +- 8 files changed, 263 insertions(+), 197 deletions(-) diff --git a/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp index e42bb2fb51..afc382a1b1 100644 --- a/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp @@ -49,7 +49,8 @@ struct atomic> { { auto offset = accessor::pointer_to_offset(ptr, desired.get()); LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(order, &ptr); - accessor::get_offset(ptr).store((offset & desired.flush_set_mask()), order); + accessor::get_offset(ptr).store( + (offset & desired.flush_set_mask()), order); } value_type @@ -57,7 +58,8 @@ struct atomic> { { auto offset = accessor::get_offset(ptr).load(order); LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(order, &ptr); - auto pointer = accessor::offset_to_pointer(offset | ~(value_type::flush_set_mask(offset)), ptr); + auto pointer = accessor::offset_to_pointer( + offset | ~(value_type::flush_set_mask(offset)), ptr); return value_type{pointer, value_type::flush_needed(offset)}; } @@ -67,30 +69,38 @@ struct atomic> { { auto new_offset = accessor::pointer_to_offset(ptr, desired.get()); - auto old_offset = - accessor::get_offset(ptr).exchange(new_offset & desired.flush_set_mask(), order); - return value_type{ - accessor::offset_to_pointer(old_offset | ~(value_type::flush_set_mask(old_offset)), ptr), value_type::flush_needed(old_offset)}; + auto old_offset = accessor::get_offset(ptr).exchange( + new_offset & desired.flush_set_mask(), order); + return value_type{accessor::offset_to_pointer( + old_offset | + ~(value_type::flush_set_mask( + old_offset)), + ptr), + value_type::flush_needed(old_offset)}; } bool - compare_exchange_weak(value_type &expected, - value_type desired, + compare_exchange_weak(value_type &expected, value_type desired, std::memory_order success, std::memory_order failure) noexcept { auto expected_offset = accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = expected_offset & expected.flush_set_mask(); + auto expected_actual = + expected_offset & expected.flush_set_mask(); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); auto desired_actual = desired_offset & desired.flush_set_mask(); bool result = accessor::get_offset(ptr).compare_exchange_weak( expected_actual, desired_actual, success, failure); if (!result) { - expected = value_type{accessor::offset_to_pointer( - expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), - value_type::flush_needed(expected_actual)}; + expected = value_type{ + accessor::offset_to_pointer( + expected_actual | + ~(value_type::flush_set_mask( + expected_actual)), + ptr), + value_type::flush_needed(expected_actual)}; } return result; } @@ -102,16 +112,21 @@ struct atomic> { { auto expected_offset = accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = expected_offset & expected.flush_set_mask(); + auto expected_actual = + expected_offset & expected.flush_set_mask(); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); auto desired_actual = desired_offset & desired.flush_set_mask(); bool result = accessor::get_offset(ptr).compare_exchange_weak( expected_actual, desired_actual, order); if (!result) { - expected = value_type{accessor::offset_to_pointer( - expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), - value_type::flush_needed(expected_actual)}; + expected = value_type{ + accessor::offset_to_pointer( + expected_actual | + ~(value_type::flush_set_mask( + expected_actual)), + ptr), + value_type::flush_needed(expected_actual)}; } return result; } @@ -123,16 +138,21 @@ struct atomic> { { auto expected_offset = accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = expected_offset & expected.flush_set_mask(); + auto expected_actual = + expected_offset & expected.flush_set_mask(); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); auto desired_actual = desired_offset & desired.flush_set_mask(); bool result = accessor::get_offset(ptr).compare_exchange_strong( expected_actual, desired_actual, success, failure); if (!result) { - expected = value_type{accessor::offset_to_pointer( - expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), - value_type::flush_needed(expected_actual)}; + expected = value_type{ + accessor::offset_to_pointer( + expected_actual | + ~(value_type::flush_set_mask( + expected_actual)), + ptr), + value_type::flush_needed(expected_actual)}; } return result; } @@ -144,16 +164,21 @@ struct atomic> { { auto expected_offset = accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = expected_offset & expected.flush_set_mask(); + auto expected_actual = + expected_offset & expected.flush_set_mask(); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); auto desired_actual = desired_offset & desired.flush_set_mask(); bool result = accessor::get_offset(ptr).compare_exchange_strong( expected_actual, desired_actual, order); if (!result) { - expected = value_type{accessor::offset_to_pointer( - expected_actual | ~(value_type::flush_set_mask(expected_actual)), ptr), - value_type::flush_needed(expected_actual)}; + expected = value_type{ + accessor::offset_to_pointer( + expected_actual | + ~(value_type::flush_set_mask( + expected_actual)), + ptr), + value_type::flush_needed(expected_actual)}; } return result; } @@ -164,7 +189,11 @@ struct atomic> { { auto offset = accessor::get_offset(ptr).fetch_add( val * static_cast(sizeof(T)), order); - return value_type{accessor::offset_to_pointer(offset | ~(value_type::flush_set_mask(offset)), ptr), value_type::flush_needed(offset)}; + return value_type{ + accessor::offset_to_pointer( + offset | ~(value_type::flush_set_mask(offset)), + ptr), + value_type::flush_needed(offset)}; } value_type @@ -173,7 +202,11 @@ struct atomic> { { auto offset = accessor::get_offset(ptr).fetch_sub( val * static_cast(sizeof(T)), order); - return value_type{accessor::offset_to_pointer(offset | ~(value_type::flush_set_mask(offset)), ptr), value_type::flush_needed(offset)}; + return value_type{ + accessor::offset_to_pointer( + offset | ~(value_type::flush_set_mask(offset)), + ptr), + value_type::flush_needed(offset)}; } bool @@ -250,17 +283,20 @@ namespace detail { /** - * can_do_snapshot atomic specialization for pa_self_relative_ptr. Not thread safe. + * can_do_snapshot atomic specialization for pa_self_relative_ptr. Not thread + * safe. * * Use in a single threaded environment only. */ template -struct can_do_snapshot>> { +struct can_do_snapshot< + std::atomic>> { using snapshot_type = obj::experimental::pa_self_relative_ptr; static constexpr bool value = sizeof(std::atomic) == - sizeof(typename snapshot_type::offset_type); - static_assert(value, - "std::atomic should be the same size"); + sizeof(typename snapshot_type::offset_type); + static_assert( + value, + "std::atomic should be the same size"); }; } /* namespace detail */ diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index 7242cf533b..4f7710f8a1 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -9,7 +9,6 @@ #include #include - /* According to the definition of offset=real_offset-1, for 8-byte aligned * allocation, the lower 3 bits of the stored offset are always 1 (except * null_ptr). Therefore, the second lowest bit is used as the indicator of if @@ -20,7 +19,7 @@ #define kFlushNeeded ~(1L << 1) // flag &= kFlushNeeded, to indicate it needs flush -#define FlushNeeded(offset) (!((offset >> 1) & 1)) +#define FlushNeeded(offset) (!((offset >> 1) & 1)) // return true if needs explicit flush, false otherwise. namespace pmem @@ -43,20 +42,19 @@ class pa_self_relative_ptr : public self_relative_ptr_base { constexpr pa_self_relative_ptr() noexcept = default; constexpr pa_self_relative_ptr(std::nullptr_t) noexcept - : self_relative_ptr_base() + : self_relative_ptr_base() { } pa_self_relative_ptr(element_type *ptr) noexcept - : self_relative_ptr_base(self_offset(ptr)) + : self_relative_ptr_base(self_offset(ptr)) { } inline element_type * get() const noexcept { - return static_cast( - this->to_void_pointer()); + return static_cast(this->to_void_pointer()); } private: @@ -110,36 +108,45 @@ class pa_self_relative_ptr : public self_relative_ptr_base { * Nullptr constructor */ constexpr pa_self_relative_ptr(std::nullptr_t) noexcept - : self_relative_ptr_base() + : self_relative_ptr_base() { -// std::cerr << "pa_self_relative_ptr 1" << std::endl; + // std::cerr << "pa_self_relative_ptr 1" << + //std::endl; } /** * Volatile pointer constructor. * * @param ptr volatile pointer, pointing to persistent memory. */ - pa_self_relative_ptr(element_type *ptr, bool flushNeeded = false) noexcept + pa_self_relative_ptr(element_type *ptr, + bool flushNeeded = false) noexcept : base_type(self_offset(ptr)) { -// std::cerr << "pa_self_relative_ptr 21: flushNeeded=" << flushNeeded << " : offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr 21: + //flushNeeded=" << flushNeeded << " : offset = " << std::hex << + //this->offset << std::endl; intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); -// std::cerr << "pa_self_relative_ptr 22" << ": offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr 22" << ": + //offset = " << std::hex << this->offset << std::endl; } /** * Constructor from persistent_ptr */ - pa_self_relative_ptr(persistent_ptr ptr, bool flushNeeded = false) noexcept + pa_self_relative_ptr(persistent_ptr ptr, + bool flushNeeded = false) noexcept : base_type(self_offset(ptr.get())) { -// std::cerr << "pa_self_relative_ptr 31: flushNeeded=" << flushNeeded << " : offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr 31: + //flushNeeded=" << flushNeeded << " : offset = " << std::hex << + //this->offset << std::endl; intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); -// std::cerr << "pa_self_relative_ptr 32" << ": offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr 32" << ": + //offset = " << std::hex << this->offset << std::endl; } /** @@ -153,11 +160,14 @@ class pa_self_relative_ptr : public self_relative_ptr_base { : base_type(self_offset( static_cast(pmemobj_direct(oid)))) { -// std::cerr << "pa_self_relative_ptr 41: flushNeeded=" << flushNeeded << " : offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr 41: + //flushNeeded=" << flushNeeded << " : offset = " << std::hex << + //this->offset << std::endl; intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); -// std::cerr << "pa_self_relative_ptr 42" << ": offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr 42" << ": + //offset = " << std::hex << this->offset << std::endl; } /** @@ -166,7 +176,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { pa_self_relative_ptr(const pa_self_relative_ptr &ptr) noexcept : base_type(ptr) { -// std::cerr << "pa_self_relative_ptr copy 1" << ": offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr copy 1" << ": + //offset = " << std::hex << this->offset << std::endl; this->offset &= ptr.flush_set_mask(); } @@ -186,7 +197,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { pa_self_relative_ptr(pa_self_relative_ptr const &r) noexcept : base_type(self_offset(static_cast(r.get()))) { -// std::cerr << "pa_self_relative_ptr copy 2" << ": offset = " << std::hex << this->offset << std::endl; + // std::cerr << "pa_self_relative_ptr copy 2" << ": + //offset = " << std::hex << this->offset << std::endl; this->offset &= r.flush_set_mask(); } @@ -230,12 +242,16 @@ class pa_self_relative_ptr : public self_relative_ptr_base { void * to_void_pointer() const noexcept { -// std::cerr << "to_void_pointer 11: flushNeeded=" << flush_needed() << " : offset = " << std::hex << this->offset << std::endl; -// intptr_t mask = is_null() == true; -// --mask; -// std::cerr << "to_void_pointer 12: mask=" << std::hex << mask << std::endl; -// std::cerr << "to_void_pointer 13: ptr=" << std::hex << ((this->offset | ~kFlushNeeded) & mask) << std::endl; - return this->offset_to_pointer(this->offset);// | ~kFlushNeeded) & mask); + // std::cerr << "to_void_pointer 11: flushNeeded=" << + //flush_needed() << " : offset = " << std::hex << this->offset + //<< std::endl; intptr_t mask = is_null() == true; + // --mask; + // std::cerr << "to_void_pointer 12: mask=" << + //std::hex << mask << std::endl; std::cerr << "to_void_pointer + //13: ptr=" << std::hex << ((this->offset | ~kFlushNeeded) & + //mask) << std::endl; + return this->offset_to_pointer( + this->offset); // | ~kFlushNeeded) & mask); } /** @@ -295,7 +311,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { } static inline bool - flush_needed(offset_type offset) { + flush_needed(offset_type offset) + { return ((offset != nullptr_offset) && FlushNeeded(offset)); } @@ -303,7 +320,9 @@ class pa_self_relative_ptr : public self_relative_ptr_base { * return mask for caller to & in order to set the flush_needed flag. * can also be used to clear the flag using offset |= ~flush_set_mask(). */ - inline intptr_t flush_set_mask() const { + inline intptr_t + flush_set_mask() const + { intptr_t mask = flush_needed(); --mask; return (mask | kFlushNeeded); @@ -312,7 +331,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { * static version of flush_set_mask() for the given offset. */ static inline intptr_t - flush_set_mask(offset_type offset) { + flush_set_mask(offset_type offset) + { intptr_t mask = this_type::flush_needed(offset); --mask; return (mask | kFlushNeeded); @@ -442,8 +462,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) - + static_cast(sizeof(T))); + ((this->offset | ~kFlushNeeded) + + static_cast(sizeof(T))); return *this; } @@ -469,8 +489,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) - - static_cast(sizeof(T))); + ((this->offset | ~kFlushNeeded) - + static_cast(sizeof(T))); return *this; } @@ -496,8 +516,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) - + s * static_cast(sizeof(T))); + ((this->offset | ~kFlushNeeded) + + s * static_cast(sizeof(T))); return *this; } @@ -511,8 +531,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { intptr_t mask = (this->flush_needed() == true); --mask; this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) - - s * static_cast(sizeof(T))); + ((this->offset | ~kFlushNeeded) - + s * static_cast(sizeof(T))); return *this; } @@ -534,34 +554,37 @@ class pa_self_relative_ptr : public self_relative_ptr_base { { intptr_t mask = other_offset == nullptr_offset; --mask; - return base_type::offset_to_pointer((other_offset | ~kFlushNeeded) & mask); - } - -// /** -// * Conversion self_relative_ptr_base to offset from itself -// */ -// difference_type -// pointer_to_offset(const pa_self_relative_ptr &ptr) const noexcept -// { -// /* -// This version without branches is vectorization-friendly. -// mask = is_null() should not create a branch in the code. -// In this line, we just assign 0 or 1 to the mask variable. -// -// This code is equal: -// return ptr.is_null() -// ? nullptr_offset -// : ptr.offset + this->distance_between_self(ptr); -// */ -// uintptr_t mask = ptr.is_null(); -// --mask; -// difference_type distance_between_self = -// reinterpret_cast(&ptr) - -// reinterpret_cast(this); -// distance_between_self &= -// reinterpret_cast(mask); -// return ptr.offset + distance_between_self; -// } + return base_type::offset_to_pointer( + (other_offset | ~kFlushNeeded) & mask); + } + + // /** + // * Conversion self_relative_ptr_base to offset from itself + // */ + // difference_type + // pointer_to_offset(const pa_self_relative_ptr &ptr) const + //noexcept + // { + // /* + // This version without branches is vectorization-friendly. + // mask = is_null() should not create a branch in the code. + // In this line, we just assign 0 or 1 to the mask + //variable. + // + // This code is equal: + // return ptr.is_null() + // ? nullptr_offset + // : ptr.offset + this->distance_between_self(ptr); + // */ + // uintptr_t mask = ptr.is_null(); + // --mask; + // difference_type distance_between_self = + // reinterpret_cast(&ptr) - + // reinterpret_cast(this); + // distance_between_self &= + // reinterpret_cast(mask); + // return ptr.offset + distance_between_self; + // } private: static constexpr difference_type nullptr_offset = 0; @@ -577,19 +600,21 @@ class pa_self_relative_ptr : public self_relative_ptr_base { ///** // * Static class accessor to self_relative_ptr_base // */ -//template -//class pa_self_relative_accessor { -//public: +// template +// class pa_self_relative_accessor { +// public: // using access_type = pmem::detail::self_relative_ptr_base_impl

; // using difference_type = typename access_type::difference_type; // // template // static difference_type -// pointer_to_offset(const access_type &obj, PointerType *ptr, bool flush_needed) +// pointer_to_offset(const access_type &obj, PointerType *ptr, bool +//flush_needed) // { // intptr_t mask = (flush_needed == true); // --mask; -// return ((mask | kFlushNeeded) & obj.pointer_to_offset(static_cast(ptr))); +// return ((mask | kFlushNeeded) & +//obj.pointer_to_offset(static_cast(ptr))); // } // // template @@ -598,8 +623,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { // { // intptr_t mask = (offset == access_type::nullptr_offset); // --mask; -// auto ptr = obj.offset_to_pointer((offset | ~kFlushNeeded) & mask); -// return static_cast(ptr); +// auto ptr = obj.offset_to_pointer((offset | ~kFlushNeeded) & +//mask); return static_cast(ptr); // } // // static P & @@ -885,5 +910,4 @@ operator<<(std::ostream &os, pa_self_relative_ptr const &ptr) } } - #endif // LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP diff --git a/include/libpmemobj++/experimental/swmr_map.hpp b/include/libpmemobj++/experimental/swmr_map.hpp index 9fa20f87c4..519187bdf1 100644 --- a/include/libpmemobj++/experimental/swmr_map.hpp +++ b/include/libpmemobj++/experimental/swmr_map.hpp @@ -5,8 +5,8 @@ #define PMEMOBJ_SWMR_MAP_HPP #include -#include #include +#include namespace pmem { @@ -18,15 +18,14 @@ namespace experimental * */ template , - typename Allocator = - pmem::obj::allocator>> -class swmr_map - : public detail::swmr_skip_list> { + typename Allocator = + pmem::obj::allocator>> +class swmr_map : public detail::swmr_skip_list> { using traits_type = detail::map_traits; + detail::default_random_generator, + Allocator, false, 64>; using base_type = pmem::detail::swmr_skip_list; public: @@ -67,8 +66,8 @@ class swmr_map * Construct the empty map */ explicit swmr_map(const key_compare &comp, - const allocator_type &alloc = allocator_type()) - : base_type(comp, alloc) + const allocator_type &alloc = allocator_type()) + : base_type(comp, alloc) { } @@ -76,10 +75,9 @@ class swmr_map * Constructs the map with the contents of the range [first, last). */ template - swmr_map(InputIt first, InputIt last, - const key_compare &comp = Comp(), - const allocator_type &alloc = allocator_type()) - : base_type(first, last, comp, alloc) + swmr_map(InputIt first, InputIt last, const key_compare &comp = Comp(), + const allocator_type &alloc = allocator_type()) + : base_type(first, last, comp, alloc) { } @@ -87,7 +85,7 @@ class swmr_map * Constructs the map with initializer list */ swmr_map(std::initializer_list ilist) - : base_type(ilist.begin(), ilist.end()) + : base_type(ilist.begin(), ilist.end()) { } @@ -97,8 +95,7 @@ class swmr_map swmr_map & operator=(const swmr_map &other) { - return static_cast( - base_type::operator=(other)); + return static_cast(base_type::operator=(other)); } /** @@ -117,8 +114,7 @@ class swmr_map swmr_map & operator=(std::initializer_list ilist) { - return static_cast( - base_type::operator=(ilist)); + return static_cast(base_type::operator=(ilist)); } }; diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp index f9dd0e5908..df1f3cda88 100644 --- a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp +++ b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp @@ -23,9 +23,9 @@ #include #include +#include #include #include -#include namespace pmem { @@ -119,7 +119,8 @@ class swmr_skip_list_node { next(size_type level) { assert(level < height()); - node_pointer current = get_next(level).load(std::memory_order_acquire); + node_pointer current = + get_next(level).load(std::memory_order_acquire); if (!current.flush_needed()) { return current; } @@ -127,13 +128,14 @@ class swmr_skip_list_node { node_pointer desired; while (true) { desired = node_pointer{current.get(), false}; - if (get_next(level).compare_exchange_weak( - current, desired)) { + if (get_next(level).compare_exchange_weak(current, + desired)) { auto &node = get_next(level); pop.persist(&node, sizeof(node)); break; } - current = get_next(level).load(std::memory_order_acquire); + current = + get_next(level).load(std::memory_order_acquire); if (!current.flush_needed()) { return current; } @@ -152,33 +154,34 @@ class swmr_skip_list_node { * Can`t be called concurrently * Should be called inside a transaction */ -// void -// set_next_tx(size_type level, node_pointer next) -// { -// assert(level < height()); -// assert(pmemobj_tx_stage() == TX_STAGE_WORK); -// auto &node = get_next(level); -// obj::flat_transaction::snapshot(&node); -// node.store(next, std::memory_order_release); -// } -// -// void -// set_next(obj::pool_base pop, size_type level, node_pointer next) -// { -// assert(level < height()); -// auto &node = get_next(level); -// node.store(next, std::memory_order_release); -// pop.persist(&node, sizeof(node)); -// } + // void + // set_next_tx(size_type level, node_pointer next) + // { + // assert(level < height()); + // assert(pmemobj_tx_stage() == TX_STAGE_WORK); + // auto &node = get_next(level); + // obj::flat_transaction::snapshot(&node); + // node.store(next, std::memory_order_release); + // } + // + // void + // set_next(obj::pool_base pop, size_type level, node_pointer next) + // { + // assert(level < height()); + // auto &node = get_next(level); + // node.store(next, std::memory_order_release); + // pop.persist(&node, sizeof(node)); + // } void set_next(size_type level, node_pointer next) { assert(level < height()); -// assert(pmemobj_tx_stage() == TX_STAGE_WORK); + // assert(pmemobj_tx_stage() == TX_STAGE_WORK); auto &node = get_next(level); -// obj::flat_transaction::snapshot(&node); - node.store(node_pointer{next.get(), true}, std::memory_order_release); + // obj::flat_transaction::snapshot(&node); + node.store(node_pointer{next.get(), true}, + std::memory_order_release); } void @@ -188,19 +191,20 @@ class swmr_skip_list_node { auto *nexts = get_nexts(); for (size_type i = 0; i < h; i++) { - nexts[i].store(node_pointer{new_nexts[i].get(), true}, std::memory_order_relaxed); + nexts[i].store(node_pointer{new_nexts[i].get(), true}, + std::memory_order_relaxed); } } -// void -// set_nexts(obj::pool_base pop, const node_pointer *new_nexts, -// size_type h) -// { -// set_nexts(new_nexts, h); -// -// auto *nexts = get_nexts(); -// pop.persist(nexts, sizeof(nexts[0]) * h); -// } + // void + // set_nexts(obj::pool_base pop, const node_pointer *new_nexts, + // size_type h) + // { + // set_nexts(new_nexts, h); + // + // auto *nexts = get_nexts(); + // pop.persist(nexts, sizeof(nexts[0]) * h); + // } /** @return number of layers */ size_type @@ -273,24 +277,28 @@ class swmr_skip_list_iterator { } /** Copy constructor. */ - swmr_skip_list_iterator(const swmr_skip_list_iterator &other) : node(other.node) + swmr_skip_list_iterator(const swmr_skip_list_iterator &other) + : node(other.node) { } /** Copy constructor for const iterator from non-const iterator */ template ::type> - swmr_skip_list_iterator(const swmr_skip_list_iterator &other) + swmr_skip_list_iterator( + const swmr_skip_list_iterator &other) : node(other.node) { } - reference operator*() const + reference + operator*() const { return *(node->get()); } - pointer operator->() const + pointer + operator->() const { return node->get(); } @@ -462,9 +470,8 @@ class swmr_skip_list { * @throw pmem::transaction_alloc_error when allocating memory for * inserted elements in transaction failed. */ - explicit swmr_skip_list( - const key_compare &comp, - const allocator_type &alloc = allocator_type()) + explicit swmr_skip_list(const key_compare &comp, + const allocator_type &alloc = allocator_type()) : _node_allocator(alloc), _compare(comp) { check_tx_stage_work(); @@ -496,8 +503,8 @@ class swmr_skip_list { */ template swmr_skip_list(InputIt first, InputIt last, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) + const key_compare &comp = key_compare(), + const allocator_type &alloc = allocator_type()) : _node_allocator(alloc), _compare(comp) { check_tx_stage_work(); @@ -555,8 +562,7 @@ class swmr_skip_list { * transaction. * @throw rethrows element constructor exception. */ - swmr_skip_list(const swmr_skip_list &other, - const allocator_type &alloc) + swmr_skip_list(const swmr_skip_list &other, const allocator_type &alloc) : _node_allocator(alloc), _compare(other._compare), _rnd_generator(other._rnd_generator) @@ -614,8 +620,7 @@ class swmr_skip_list { * transaction. * @throw rethrows element constructor exception. */ - swmr_skip_list(swmr_skip_list &&other, - const allocator_type &alloc) + swmr_skip_list(swmr_skip_list &&other, const allocator_type &alloc) : _node_allocator(alloc), _compare(other._compare), _rnd_generator(other._rnd_generator) @@ -992,7 +997,7 @@ class swmr_skip_list { */ template std::pair - emplace(Args &&... args) + emplace(Args &&...args) { return internal_emplace(std::forward(args)...); } @@ -1029,7 +1034,7 @@ class swmr_skip_list { */ template iterator - emplace_hint(const_iterator hint, Args &&... args) + emplace_hint(const_iterator hint, Args &&...args) { /* Ignore hint */ return emplace(std::forward(args)...).first; @@ -1060,7 +1065,7 @@ class swmr_skip_list { */ template std::pair - try_emplace(const key_type &k, Args &&... args) + try_emplace(const key_type &k, Args &&...args) { return internal_try_emplace(k, std::forward(args)...); } @@ -1090,7 +1095,7 @@ class swmr_skip_list { */ template std::pair - try_emplace(key_type &&k, Args &&... args) + try_emplace(key_type &&k, Args &&...args) { return internal_try_emplace(std::move(k), std::forward(args)...); @@ -1127,7 +1132,7 @@ class swmr_skip_list { has_is_transparent::value && std::is_constructible::value, std::pair>::type - try_emplace(K &&k, Args &&... args) + try_emplace(K &&k, Args &&...args) { return internal_try_emplace(std::forward(k), std::forward(args)...); @@ -1903,7 +1908,8 @@ class swmr_skip_list { node_ptr head = dummy_head.get(); for (size_type i = 0; i < head->height(); ++i) { -// head->set_next_tx(i, nullptr); + // head->set_next_tx(i, + //nullptr); head->set_next(i, nullptr); } @@ -2404,7 +2410,7 @@ class swmr_skip_list { template std::pair - internal_try_emplace(K &&key, Args &&... args) + internal_try_emplace(K &&key, Args &&...args) { return internal_insert( key, std::piecewise_construct, @@ -2414,7 +2420,7 @@ class swmr_skip_list { template std::pair - internal_emplace(Args &&... args) + internal_emplace(Args &&...args) { check_outside_tx(); tls_entry_type &tls_entry = tls_data.local(); @@ -2438,8 +2444,9 @@ class swmr_skip_list { assert(tls_entry.insert_stage == not_started); assert(tls_entry.ptr != nullptr); -// n->set_nexts(pop, next_nodes.data(), height); - n->set_nexts(next_nodes.data(), height); + // n->set_nexts(pop, + //next_nodes.data(), height); + n->set_nexts(next_nodes.data(), height); tls_entry.insert_stage = in_progress; pop.persist(&(tls_entry.insert_stage), @@ -2469,7 +2476,7 @@ class swmr_skip_list { */ template std::pair - internal_unsafe_emplace(Args &&... args) + internal_unsafe_emplace(Args &&...args) { check_tx_stage_work(); @@ -2506,7 +2513,7 @@ class swmr_skip_list { */ template std::pair - internal_insert(const K &key, Args &&... args) + internal_insert(const K &key, Args &&...args) { check_outside_tx(); tls_entry_type &tls_entry = tls_data.local(); @@ -2620,7 +2627,8 @@ class swmr_skip_list { next_nodes[level]); assert(prev_nodes[level]->next(level) == n->next(level)); -// prev_nodes[level]->set_next(pop, level, new_node); + // prev_nodes[level]->set_next(pop, + //level, new_node); prev_nodes[level]->set_next(level, new_node); } @@ -2834,10 +2842,10 @@ class swmr_skip_list { ++level) { assert(prev_nodes[level]->height() > level); assert(next_nodes[level].get() == erase_node); -// prev_nodes[level]->set_next_tx(level, -// erase_node->next(level)); + // prev_nodes[level]->set_next_tx(level, + // erase_node->next(level)); prev_nodes[level]->set_next(level, - erase_node->next(level)); + erase_node->next(level)); } return std::pair( @@ -2913,7 +2921,7 @@ class swmr_skip_list { /** Creates new node */ template persistent_node_ptr - create_node(Args &&... args) + create_node(Args &&...args) { size_type levels = random_level(); @@ -2983,7 +2991,7 @@ class swmr_skip_list { */ template persistent_node_ptr - creates_dummy_node(size_type height, Args &&... args) + creates_dummy_node(size_type height, Args &&...args) { assert(pmemobj_tx_stage() == TX_STAGE_WORK); size_type sz = calc_node_size(height); @@ -3125,7 +3133,8 @@ class swmr_skip_list { /* Otherwise, node already linked on * this layer */ assert(n->next(level) == next_nodes[level]); -// prev_nodes[level]->set_next(pop, level, node); + // prev_nodes[level]->set_next(pop, + //level, node); prev_nodes[level]->set_next(level, node); } } diff --git a/tests/ptr/pa_self_relative_ptr.cpp b/tests/ptr/pa_self_relative_ptr.cpp index 4cd0e39b67..1efe91bbf0 100644 --- a/tests/ptr/pa_self_relative_ptr.cpp +++ b/tests/ptr/pa_self_relative_ptr.cpp @@ -53,7 +53,8 @@ test_offset(nvobj::pool &pop) auto distance = self_relative_ptr_base::distance_between; - pa_self_relative_ptr cptr = nvobj::make_persistent(); + pa_self_relative_ptr cptr = + nvobj::make_persistent(); pa_self_relative_ptr bptr = cptr; UT_ASSERT(distance(cptr, bptr) > 0); UT_ASSERT(static_cast( diff --git a/tests/ptr/pa_self_relative_ptr_atomic.hpp b/tests/ptr/pa_self_relative_ptr_atomic.hpp index 229f15d339..7c75a16e21 100644 --- a/tests/ptr/pa_self_relative_ptr_atomic.hpp +++ b/tests/ptr/pa_self_relative_ptr_atomic.hpp @@ -135,7 +135,8 @@ void test_exchange() { pa_self_relative_ptr first = reinterpret_cast(uintptr_t{0}); - pa_self_relative_ptr second = reinterpret_cast(~uintptr_t{0}); + pa_self_relative_ptr second = + reinterpret_cast(~uintptr_t{0}); atomic_type ptr; diff --git a/tests/swmr_map/swmr_map.cpp b/tests/swmr_map/swmr_map.cpp index ca5bea5dfe..ca0633fb26 100644 --- a/tests/swmr_map/swmr_map.cpp +++ b/tests/swmr_map/swmr_map.cpp @@ -41,8 +41,7 @@ struct hetero_less { typedef nvobj::experimental::swmr_map, nvobj::p> persistent_map_type_int; -typedef nvobj::experimental::swmr_map +typedef nvobj::experimental::swmr_map persistent_map_type_string; struct root { diff --git a/tests/swmr_map/swmr_map_singlethread.cpp b/tests/swmr_map/swmr_map_singlethread.cpp index 40eb4f6286..30510577ec 100644 --- a/tests/swmr_map/swmr_map_singlethread.cpp +++ b/tests/swmr_map/swmr_map_singlethread.cpp @@ -80,8 +80,8 @@ gen_hetero(int v) return std::to_string(v); } -typedef nvobj::experimental::swmr_map +typedef nvobj::experimental::swmr_map persistent_map_string_type; struct root { @@ -110,7 +110,7 @@ verify_elements(persistent_map_type &map, size_t elements) template void tx_alloc_wrapper(nvobj::pool_base &pop, nvobj::persistent_ptr &ptr, - Args &&... args) + Args &&...args) { try { nvobj::transaction::manual tx(pop); From def1592b20a9fb8aabd42531accd0abc9e92d9bd Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 01:41:35 +0800 Subject: [PATCH 05/34] Clear format --- .../experimental/pa_self_relative_ptr.hpp | 53 +------------------ 1 file changed, 1 insertion(+), 52 deletions(-) diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index 4f7710f8a1..022d4f0a5f 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -242,16 +242,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { void * to_void_pointer() const noexcept { - // std::cerr << "to_void_pointer 11: flushNeeded=" << - //flush_needed() << " : offset = " << std::hex << this->offset - //<< std::endl; intptr_t mask = is_null() == true; - // --mask; - // std::cerr << "to_void_pointer 12: mask=" << - //std::hex << mask << std::endl; std::cerr << "to_void_pointer - //13: ptr=" << std::hex << ((this->offset | ~kFlushNeeded) & - //mask) << std::endl; return this->offset_to_pointer( - this->offset); // | ~kFlushNeeded) & mask); + this->offset); } /** @@ -596,49 +588,6 @@ class pa_self_relative_ptr : public self_relative_ptr_base { template friend class pa_self_relative_accessor; }; -// -///** -// * Static class accessor to self_relative_ptr_base -// */ -// template -// class pa_self_relative_accessor { -// public: -// using access_type = pmem::detail::self_relative_ptr_base_impl

; -// using difference_type = typename access_type::difference_type; -// -// template -// static difference_type -// pointer_to_offset(const access_type &obj, PointerType *ptr, bool -//flush_needed) -// { -// intptr_t mask = (flush_needed == true); -// --mask; -// return ((mask | kFlushNeeded) & -//obj.pointer_to_offset(static_cast(ptr))); -// } -// -// template -// static PointerType * -// offset_to_pointer(difference_type offset, const access_type &obj) -// { -// intptr_t mask = (offset == access_type::nullptr_offset); -// --mask; -// auto ptr = obj.offset_to_pointer((offset | ~kFlushNeeded) & -//mask); return static_cast(ptr); -// } -// -// static P & -// get_offset(access_type &ptr) -// { -// return ptr.offset; -// } -// -// static const P & -// get_offset(const access_type &ptr) -// { -// return ptr.offset; -// } -//}; /** * Swaps two pa_self_relative_ptr objects of the same type. From 0ea98e79207b2f2e150156e97c04398a30671c49 Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 01:49:19 +0800 Subject: [PATCH 06/34] Clear format --- .../experimental/pa_self_relative_ptr.hpp | 49 ------------- .../experimental/swmr_skip_list_impl.hpp | 73 ++++++++----------- 2 files changed, 32 insertions(+), 90 deletions(-) diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index 022d4f0a5f..4848649d50 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -110,8 +110,6 @@ class pa_self_relative_ptr : public self_relative_ptr_base { constexpr pa_self_relative_ptr(std::nullptr_t) noexcept : self_relative_ptr_base() { - // std::cerr << "pa_self_relative_ptr 1" << - //std::endl; } /** * Volatile pointer constructor. @@ -122,14 +120,9 @@ class pa_self_relative_ptr : public self_relative_ptr_base { bool flushNeeded = false) noexcept : base_type(self_offset(ptr)) { - // std::cerr << "pa_self_relative_ptr 21: - //flushNeeded=" << flushNeeded << " : offset = " << std::hex << - //this->offset << std::endl; intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); - // std::cerr << "pa_self_relative_ptr 22" << ": - //offset = " << std::hex << this->offset << std::endl; } /** @@ -139,14 +132,9 @@ class pa_self_relative_ptr : public self_relative_ptr_base { bool flushNeeded = false) noexcept : base_type(self_offset(ptr.get())) { - // std::cerr << "pa_self_relative_ptr 31: - //flushNeeded=" << flushNeeded << " : offset = " << std::hex << - //this->offset << std::endl; intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); - // std::cerr << "pa_self_relative_ptr 32" << ": - //offset = " << std::hex << this->offset << std::endl; } /** @@ -160,14 +148,9 @@ class pa_self_relative_ptr : public self_relative_ptr_base { : base_type(self_offset( static_cast(pmemobj_direct(oid)))) { - // std::cerr << "pa_self_relative_ptr 41: - //flushNeeded=" << flushNeeded << " : offset = " << std::hex << - //this->offset << std::endl; intptr_t mask = (flushNeeded == true); --mask; this->offset &= (mask | kFlushNeeded); - // std::cerr << "pa_self_relative_ptr 42" << ": - //offset = " << std::hex << this->offset << std::endl; } /** @@ -176,8 +159,6 @@ class pa_self_relative_ptr : public self_relative_ptr_base { pa_self_relative_ptr(const pa_self_relative_ptr &ptr) noexcept : base_type(ptr) { - // std::cerr << "pa_self_relative_ptr copy 1" << ": - //offset = " << std::hex << this->offset << std::endl; this->offset &= ptr.flush_set_mask(); } @@ -197,8 +178,6 @@ class pa_self_relative_ptr : public self_relative_ptr_base { pa_self_relative_ptr(pa_self_relative_ptr const &r) noexcept : base_type(self_offset(static_cast(r.get()))) { - // std::cerr << "pa_self_relative_ptr copy 2" << ": - //offset = " << std::hex << this->offset << std::endl; this->offset &= r.flush_set_mask(); } @@ -550,34 +529,6 @@ class pa_self_relative_ptr : public self_relative_ptr_base { (other_offset | ~kFlushNeeded) & mask); } - // /** - // * Conversion self_relative_ptr_base to offset from itself - // */ - // difference_type - // pointer_to_offset(const pa_self_relative_ptr &ptr) const - //noexcept - // { - // /* - // This version without branches is vectorization-friendly. - // mask = is_null() should not create a branch in the code. - // In this line, we just assign 0 or 1 to the mask - //variable. - // - // This code is equal: - // return ptr.is_null() - // ? nullptr_offset - // : ptr.offset + this->distance_between_self(ptr); - // */ - // uintptr_t mask = ptr.is_null(); - // --mask; - // difference_type distance_between_self = - // reinterpret_cast(&ptr) - - // reinterpret_cast(this); - // distance_between_self &= - // reinterpret_cast(mask); - // return ptr.offset + distance_between_self; - // } - private: static constexpr difference_type nullptr_offset = 0; difference_type diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp index df1f3cda88..a4e0f30638 100644 --- a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp +++ b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp @@ -154,32 +154,34 @@ class swmr_skip_list_node { * Can`t be called concurrently * Should be called inside a transaction */ - // void - // set_next_tx(size_type level, node_pointer next) - // { - // assert(level < height()); - // assert(pmemobj_tx_stage() == TX_STAGE_WORK); - // auto &node = get_next(level); - // obj::flat_transaction::snapshot(&node); - // node.store(next, std::memory_order_release); - // } - // - // void - // set_next(obj::pool_base pop, size_type level, node_pointer next) - // { - // assert(level < height()); - // auto &node = get_next(level); - // node.store(next, std::memory_order_release); - // pop.persist(&node, sizeof(node)); - // } + /* + void + set_next_tx(size_type level, node_pointer next) + { + assert(level < height()); + assert(pmemobj_tx_stage() == TX_STAGE_WORK); + auto &node = get_next(level); + obj::flat_transaction::snapshot(&node); + node.store(next, std::memory_order_release); + } + + void + set_next(obj::pool_base pop, size_type level, node_pointer next) + { + assert(level < height()); + auto &node = get_next(level); + node.store(next, std::memory_order_release); + pop.persist(&node, sizeof(node)); + }*/ void set_next(size_type level, node_pointer next) { assert(level < height()); - // assert(pmemobj_tx_stage() == TX_STAGE_WORK); + /* assert(pmemobj_tx_stage() == TX_STAGE_WORK); */ auto &node = get_next(level); - // obj::flat_transaction::snapshot(&node); + /* obj::flat_transaction::snapshot + * (&node); */ node.store(node_pointer{next.get(), true}, std::memory_order_release); } @@ -195,17 +197,17 @@ class swmr_skip_list_node { std::memory_order_relaxed); } } + /* + void + set_nexts(obj::pool_base pop, const node_pointer *new_nexts, + size_type h) + { + set_nexts(new_nexts, h); - // void - // set_nexts(obj::pool_base pop, const node_pointer *new_nexts, - // size_type h) - // { - // set_nexts(new_nexts, h); - // - // auto *nexts = get_nexts(); - // pop.persist(nexts, sizeof(nexts[0]) * h); - // } - + auto *nexts = get_nexts(); + pop.persist(nexts, sizeof(nexts[0]) * h); + } + */ /** @return number of layers */ size_type height() const @@ -1908,8 +1910,6 @@ class swmr_skip_list { node_ptr head = dummy_head.get(); for (size_type i = 0; i < head->height(); ++i) { - // head->set_next_tx(i, - //nullptr); head->set_next(i, nullptr); } @@ -2443,9 +2443,6 @@ class swmr_skip_list { -> persistent_node_ptr & { assert(tls_entry.insert_stage == not_started); assert(tls_entry.ptr != nullptr); - - // n->set_nexts(pop, - //next_nodes.data(), height); n->set_nexts(next_nodes.data(), height); tls_entry.insert_stage = in_progress; @@ -2627,8 +2624,6 @@ class swmr_skip_list { next_nodes[level]); assert(prev_nodes[level]->next(level) == n->next(level)); - // prev_nodes[level]->set_next(pop, - //level, new_node); prev_nodes[level]->set_next(level, new_node); } @@ -2842,8 +2837,6 @@ class swmr_skip_list { ++level) { assert(prev_nodes[level]->height() > level); assert(next_nodes[level].get() == erase_node); - // prev_nodes[level]->set_next_tx(level, - // erase_node->next(level)); prev_nodes[level]->set_next(level, erase_node->next(level)); } @@ -3133,8 +3126,6 @@ class swmr_skip_list { /* Otherwise, node already linked on * this layer */ assert(n->next(level) == next_nodes[level]); - // prev_nodes[level]->set_next(pop, - //level, node); prev_nodes[level]->set_next(level, node); } } From ec1f1cc78ad628e5c26ec84774c88e07767269fe Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 01:53:55 +0800 Subject: [PATCH 07/34] Clear format --- .../experimental/pa_self_relative_ptr.hpp | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index 4848649d50..af12dff3dc 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -221,15 +221,13 @@ class pa_self_relative_ptr : public self_relative_ptr_base { void * to_void_pointer() const noexcept { - return this->offset_to_pointer( - this->offset); + return this->offset_to_pointer(this->offset); } /** * Explicit conversion operator to void* */ - explicit - operator void *() const noexcept + explicit operator void *() const noexcept { return to_void_pointer(); } @@ -340,8 +338,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { /** * Dereference operator. */ - typename pmem::detail::sp_dereference::type - operator*() const noexcept + typename pmem::detail::sp_dereference::type operator*() const + noexcept { return *(this->get()); } @@ -349,8 +347,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { /** * Member access operator. */ - typename pmem::detail::sp_member_access::type - operator->() const noexcept + typename pmem::detail::sp_member_access::type operator->() const + noexcept { return this->get(); } From 51ee8aa8fcb4987981bf0bd3087bffa430c5fd50 Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 09:24:05 +0800 Subject: [PATCH 08/34] Clear format --- .../experimental/swmr_skip_list_impl.hpp | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp index a4e0f30638..2d4cee0de9 100644 --- a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp +++ b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp @@ -293,14 +293,12 @@ class swmr_skip_list_iterator { { } - reference - operator*() const + reference operator*() const { return *(node->get()); } - pointer - operator->() const + pointer operator->() const { return node->get(); } @@ -999,7 +997,7 @@ class swmr_skip_list { */ template std::pair - emplace(Args &&...args) + emplace(Args &&... args) { return internal_emplace(std::forward(args)...); } @@ -1036,7 +1034,7 @@ class swmr_skip_list { */ template iterator - emplace_hint(const_iterator hint, Args &&...args) + emplace_hint(const_iterator hint, Args &&... args) { /* Ignore hint */ return emplace(std::forward(args)...).first; @@ -1067,7 +1065,7 @@ class swmr_skip_list { */ template std::pair - try_emplace(const key_type &k, Args &&...args) + try_emplace(const key_type &k, Args &&... args) { return internal_try_emplace(k, std::forward(args)...); } @@ -1097,7 +1095,7 @@ class swmr_skip_list { */ template std::pair - try_emplace(key_type &&k, Args &&...args) + try_emplace(key_type &&k, Args &&... args) { return internal_try_emplace(std::move(k), std::forward(args)...); @@ -1134,7 +1132,7 @@ class swmr_skip_list { has_is_transparent::value && std::is_constructible::value, std::pair>::type - try_emplace(K &&k, Args &&...args) + try_emplace(K &&k, Args &&... args) { return internal_try_emplace(std::forward(k), std::forward(args)...); @@ -2410,7 +2408,7 @@ class swmr_skip_list { template std::pair - internal_try_emplace(K &&key, Args &&...args) + internal_try_emplace(K &&key, Args &&... args) { return internal_insert( key, std::piecewise_construct, @@ -2420,7 +2418,7 @@ class swmr_skip_list { template std::pair - internal_emplace(Args &&...args) + internal_emplace(Args &&... args) { check_outside_tx(); tls_entry_type &tls_entry = tls_data.local(); @@ -2473,7 +2471,7 @@ class swmr_skip_list { */ template std::pair - internal_unsafe_emplace(Args &&...args) + internal_unsafe_emplace(Args &&... args) { check_tx_stage_work(); @@ -2510,7 +2508,7 @@ class swmr_skip_list { */ template std::pair - internal_insert(const K &key, Args &&...args) + internal_insert(const K &key, Args &&... args) { check_outside_tx(); tls_entry_type &tls_entry = tls_data.local(); @@ -2914,7 +2912,7 @@ class swmr_skip_list { /** Creates new node */ template persistent_node_ptr - create_node(Args &&...args) + create_node(Args &&... args) { size_type levels = random_level(); @@ -2984,7 +2982,7 @@ class swmr_skip_list { */ template persistent_node_ptr - creates_dummy_node(size_type height, Args &&...args) + creates_dummy_node(size_type height, Args &&... args) { assert(pmemobj_tx_stage() == TX_STAGE_WORK); size_type sz = calc_node_size(height); From 6af3575765d939c94ee20be7764908bdce6fad2b Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 09:47:21 +0800 Subject: [PATCH 09/34] Clear format --- tests/swmr_map/swmr_map_singlethread.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/swmr_map/swmr_map_singlethread.cpp b/tests/swmr_map/swmr_map_singlethread.cpp index 30510577ec..9fdf1ada41 100644 --- a/tests/swmr_map/swmr_map_singlethread.cpp +++ b/tests/swmr_map/swmr_map_singlethread.cpp @@ -110,7 +110,7 @@ verify_elements(persistent_map_type &map, size_t elements) template void tx_alloc_wrapper(nvobj::pool_base &pop, nvobj::persistent_ptr &ptr, - Args &&...args) + Args &&... args) { try { nvobj::transaction::manual tx(pop); From 1a3c8c046f80cb8cc71e36c33f0d9b1e7aca953f Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 10:39:44 +0800 Subject: [PATCH 10/34] Clear format --- .../experimental/atomic_pa_self_relative_ptr.hpp | 6 +++--- .../libpmemobj++/experimental/atomic_self_relative_ptr.hpp | 2 +- tests/swmr_map/swmr_map.cpp | 2 +- tests/swmr_map/swmr_map_insert_reopen.cpp | 2 +- tests/swmr_map/swmr_map_pmreorder_simple.cpp | 2 +- tests/swmr_map/swmr_map_singlethread.cpp | 2 +- tests/swmr_map/swmr_map_tx.cpp | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp index afc382a1b1..08233421b5 100644 --- a/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp @@ -1,8 +1,8 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2020-2021, Intel Corporation */ +/* Copyright 2021, 4Paradigm Inc. */ -#ifndef LIBPMEMOBJ_CPP_ATOMIC_SELF_RELATIVE_PTR_HPP -#define LIBPMEMOBJ_CPP_ATOMIC_SELF_RELATIVE_PTR_HPP +#ifndef LIBPMEMOBJ_CPP_ATOMIC_PA_SELF_RELATIVE_PTR_HPP +#define LIBPMEMOBJ_CPP_ATOMIC_PA_SELF_RELATIVE_PTR_HPP #include #include diff --git a/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp b/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp index 14533dcd24..d1488cdb2e 100644 --- a/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2020-2021, Intel Corporation */ +/* Copyright 2021, 4Paradigm Inc. */ #ifndef LIBPMEMOBJ_CPP_ATOMIC_SELF_RELATIVE_PTR_HPP #define LIBPMEMOBJ_CPP_ATOMIC_SELF_RELATIVE_PTR_HPP diff --git a/tests/swmr_map/swmr_map.cpp b/tests/swmr_map/swmr_map.cpp index ca0633fb26..d26568784a 100644 --- a/tests/swmr_map/swmr_map.cpp +++ b/tests/swmr_map/swmr_map.cpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2020, Intel Corporation */ +/* Copyright 2021, 4Paradigm Inc. */ /* * swmr_map.cpp -- pmem::obj::experimental::swmr_map test diff --git a/tests/swmr_map/swmr_map_insert_reopen.cpp b/tests/swmr_map/swmr_map_insert_reopen.cpp index 7dcb3ccc9c..25095f85f7 100644 --- a/tests/swmr_map/swmr_map_insert_reopen.cpp +++ b/tests/swmr_map/swmr_map_insert_reopen.cpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2020, Intel Corporation */ +/* Copyright 2021, 4Paradigm Inc. */ /* * swmr_map_insert_reopen.cpp -- pmem::obj::experimental::swmr_map diff --git a/tests/swmr_map/swmr_map_pmreorder_simple.cpp b/tests/swmr_map/swmr_map_pmreorder_simple.cpp index ce05438c5c..f33134102b 100644 --- a/tests/swmr_map/swmr_map_pmreorder_simple.cpp +++ b/tests/swmr_map/swmr_map_pmreorder_simple.cpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2020, Intel Corporation */ +/* Copyright 2021, 4Paradigm Inc. */ /* * swmr_map_reorder_simple.cpp -- pmem::obj::experimental::swmr_map diff --git a/tests/swmr_map/swmr_map_singlethread.cpp b/tests/swmr_map/swmr_map_singlethread.cpp index 9fdf1ada41..bd94acc035 100644 --- a/tests/swmr_map/swmr_map_singlethread.cpp +++ b/tests/swmr_map/swmr_map_singlethread.cpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2020, Intel Corporation */ +/* Copyright 2021, 4Paradigm Inc. */ /* * swmr_map_singlethread.cpp -- pmem::obj::experimental::swmr_map diff --git a/tests/swmr_map/swmr_map_tx.cpp b/tests/swmr_map/swmr_map_tx.cpp index e618e3c208..f553016a94 100644 --- a/tests/swmr_map/swmr_map_tx.cpp +++ b/tests/swmr_map/swmr_map_tx.cpp @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2019-2020, Intel Corporation */ +/* Copyright 2021, 4Paradigm Inc. */ /* * concurrent_hash_map_tx.cpp -- pmem::obj::experimental::swmr_map test From 08ddbfe5b373abb06db62ebaf47d695f1390c8bd Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 12:35:31 +0800 Subject: [PATCH 11/34] Fix copy constructor --- include/libpmemobj++/experimental/pa_self_relative_ptr.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index af12dff3dc..53e6fbee74 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -159,7 +159,7 @@ class pa_self_relative_ptr : public self_relative_ptr_base { pa_self_relative_ptr(const pa_self_relative_ptr &ptr) noexcept : base_type(ptr) { - this->offset &= ptr.flush_set_mask(); + /* this->offset &= ptr.flush_set_mask(); */ } /** From 380c70fec52a2ca493efc3afa8abb96eae025b20 Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 14:39:29 +0800 Subject: [PATCH 12/34] Add some comments --- .../experimental/pa_self_relative_ptr.hpp | 15 +++++---- .../experimental/swmr_skip_list_impl.hpp | 31 ++++++++----------- 2 files changed, 20 insertions(+), 26 deletions(-) diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index 53e6fbee74..4d5c8cc9a1 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -160,6 +160,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { : base_type(ptr) { /* this->offset &= ptr.flush_set_mask(); */ + /* the base_type calculates the offset based on the difference + * of the volatile ptr, so no need to reset the mask. */ } /** @@ -179,6 +181,8 @@ class pa_self_relative_ptr : public self_relative_ptr_base { : base_type(self_offset(static_cast(r.get()))) { this->offset &= r.flush_set_mask(); + /* the base_type set the offset using the volatile ptr, so + * the mask must be set manually */ } ~pa_self_relative_ptr() @@ -202,6 +206,9 @@ class pa_self_relative_ptr : public self_relative_ptr_base { auto second = other.to_byte_pointer(); this->offset = pointer_to_offset(second); this->offset &= other.flush_set_mask(); + /* pointer_to_offset after to_byte_pointer removes the + * flush_needed flag, so it needs to be set manually + * after the recalculation of offset */ other.offset = other.pointer_to_offset(first); other.offset &= mask; } @@ -307,14 +314,6 @@ class pa_self_relative_ptr : public self_relative_ptr_base { return (mask | kFlushNeeded); } - /** - * return offset for debug only - */ - offset_type - get_offset() const - { - return this->offset; - } /* * Operators */ diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp index 2d4cee0de9..eff9a6ed26 100644 --- a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp +++ b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp @@ -115,6 +115,10 @@ class swmr_skip_list_node { return *get(); } + /* return the node_pointer of the next node on specific level. + * if it is marked flush_needed(), use CAS to clear the flag and + * perform the flush before return. + * */ node_pointer next(size_type level) { @@ -154,7 +158,7 @@ class swmr_skip_list_node { * Can`t be called concurrently * Should be called inside a transaction */ - /* + void set_next_tx(size_type level, node_pointer next) { @@ -165,25 +169,16 @@ class swmr_skip_list_node { node.store(next, std::memory_order_release); } - void - set_next(obj::pool_base pop, size_type level, node_pointer next) - { - assert(level < height()); - auto &node = get_next(level); - node.store(next, std::memory_order_release); - pop.persist(&node, sizeof(node)); - }*/ - void set_next(size_type level, node_pointer next) { assert(level < height()); - /* assert(pmemobj_tx_stage() == TX_STAGE_WORK); */ auto &node = get_next(level); - /* obj::flat_transaction::snapshot - * (&node); */ node.store(node_pointer{next.get(), true}, std::memory_order_release); + /* pop.persist(&node, sizeof(node)); */ + /* instead of persist it immediately, mark it as flush_needed, + * and rely on consequent get_next operation to flush. */ } void @@ -197,7 +192,7 @@ class swmr_skip_list_node { std::memory_order_relaxed); } } - /* + void set_nexts(obj::pool_base pop, const node_pointer *new_nexts, size_type h) @@ -207,7 +202,7 @@ class swmr_skip_list_node { auto *nexts = get_nexts(); pop.persist(nexts, sizeof(nexts[0]) * h); } - */ + /** @return number of layers */ size_type height() const @@ -1908,7 +1903,7 @@ class swmr_skip_list { node_ptr head = dummy_head.get(); for (size_type i = 0; i < head->height(); ++i) { - head->set_next(i, nullptr); + head->set_next_tx(i, nullptr); } on_init_size = 0; @@ -2441,7 +2436,7 @@ class swmr_skip_list { -> persistent_node_ptr & { assert(tls_entry.insert_stage == not_started); assert(tls_entry.ptr != nullptr); - n->set_nexts(next_nodes.data(), height); + n->set_nexts(pop, next_nodes.data(), height); tls_entry.insert_stage = in_progress; pop.persist(&(tls_entry.insert_stage), @@ -2835,7 +2830,7 @@ class swmr_skip_list { ++level) { assert(prev_nodes[level]->height() > level); assert(next_nodes[level].get() == erase_node); - prev_nodes[level]->set_next(level, + prev_nodes[level]->set_next_tx(level, erase_node->next(level)); } From 8226e94f21a4492172565a4de453ec1ca2cc45f7 Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Fri, 23 Jul 2021 15:55:01 +0800 Subject: [PATCH 13/34] Fix and add some comments. --- .../experimental/pa_self_relative_ptr.hpp | 20 +++++++++++++------ .../experimental/swmr_skip_list_impl.hpp | 2 -- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp index 4d5c8cc9a1..5b0aab3163 100644 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp @@ -9,12 +9,20 @@ #include #include -/* According to the definition of offset=real_offset-1, for 8-byte aligned - * allocation, the lower 3 bits of the stored offset are always 1 (except - * null_ptr). Therefore, the second lowest bit is used as the indicator of if - * the data pointed by the pa_self_relative_ptr - * (persistent-aware self_relative_ptr) needs explicit flush. - * Flush is needed if it is 0, not needed if it is 1. +/* To prevent concurrent read ops to see non-persistent data updated by a + * concurrent write op in PMem, expensive locks/txns are commonly used for + * write op, e.g., locks the data object during updates and flush, unlock it + * after flush is finished, or encapsulate the updates and flush in a PMDK txns. + * Persistent-aware self_relative_ptr aims to solve this problem without using + * locks or txns. + * + * Based on self_relative_ptr, the definition of the stored offset + * is real_offset-1, so for 8-byte aligned allocation, the lower 3 bits + * of the stored offset are always 1 (except null_ptr). Therefore, the second + * lowest bit can be used as the indicator of whether the + * pa_self_relative_ptr (persistent-aware self_relative_ptr) itself needs + * explicit flush. If a consequent (atomic) read sees the second lowest bit is + * 0, it uses CAS to reset the bit to 1 and explicitly performs the flush. * */ #define kFlushNeeded ~(1L << 1) diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp index eff9a6ed26..b2ab866875 100644 --- a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp +++ b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp @@ -138,8 +138,6 @@ class swmr_skip_list_node { pop.persist(&node, sizeof(node)); break; } - current = - get_next(level).load(std::memory_order_acquire); if (!current.flush_needed()) { return current; } From 92d238dab0ceb2056f6ba5fa0ef7c8f2d1ce7aaa Mon Sep 17 00:00:00 2001 From: Yang Jun 01 Date: Mon, 23 Aug 2021 16:43:12 +0800 Subject: [PATCH 14/34] Implement persistent-aware feature of self_relative_ptr as a templated parameter. Implement the single-writer-multi-reader skip-list based on the persistent-aware self_relative_ptr in existing concurrent_skip_list_impl as a templated parameter. Add some UTs. --- .../detail/concurrent_skip_list_impl.hpp | 119 +- .../detail/self_relative_ptr_base_impl.hpp | 28 +- .../atomic_pa_self_relative_ptr.hpp | 306 -- .../experimental/atomic_self_relative_ptr.hpp | 365 +- .../experimental/concurrent_map.hpp | 4 +- .../experimental/pa_self_relative_ptr.hpp | 818 ----- .../experimental/self_relative_ptr.hpp | 207 +- .../libpmemobj++/experimental/swmr_map.hpp | 10 +- .../experimental/swmr_skip_list_impl.hpp | 3165 ----------------- tests/ptr/pa_self_relative_ptr.cpp | 5 +- tests/ptr/pa_self_relative_ptr_arith.cpp | 10 +- tests/ptr/pa_self_relative_ptr_atomic.hpp | 9 +- .../ptr/pa_self_relative_ptr_atomic_pmem.cpp | 7 +- tests/ptr/ptr.hpp | 35 +- tests/ptr/self_relative_ptr.cpp | 2 +- tests/ptr/self_relative_ptr_arith.cpp | 9 +- tests/ptr/self_relative_ptr_atomic.hpp | 15 +- tests/ptr/self_relative_ptr_atomic_pmem.cpp | 3 +- tests/swmr_map/swmr_map.cpp | 54 +- tests/swmr_map/swmr_map_insert_reopen.cpp | 4 +- 20 files changed, 717 insertions(+), 4458 deletions(-) delete mode 100644 include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp delete mode 100644 include/libpmemobj++/experimental/pa_self_relative_ptr.hpp delete mode 100644 include/libpmemobj++/experimental/swmr_skip_list_impl.hpp diff --git a/include/libpmemobj++/container/detail/concurrent_skip_list_impl.hpp b/include/libpmemobj++/container/detail/concurrent_skip_list_impl.hpp index d9671e9dc8..d72ef8fc59 100644 --- a/include/libpmemobj++/container/detail/concurrent_skip_list_impl.hpp +++ b/include/libpmemobj++/container/detail/concurrent_skip_list_impl.hpp @@ -115,7 +115,8 @@ allocator_swap(MyAlloc &, OtherAlloc &, std::false_type) { /* NO SWAP */ } -template > class skip_list_node { public: @@ -126,7 +127,8 @@ class skip_list_node { using pointer = value_type *; using const_pointer = const value_type *; using node_pointer = - obj::experimental::self_relative_ptr; + obj::experimental::self_relative_ptr; using atomic_node_pointer = std::atomic; using mutex_type = Mutex; using lock_type = LockType; @@ -205,6 +207,13 @@ class skip_list_node { return get_next(level).load(std::memory_order_acquire); } + node_pointer + next(size_type level) + { + assert(level < height()); + return get_next(level).persist_load(std::memory_order_acquire); + } + /** * Can`t be called concurrently * Should be called inside a transaction @@ -228,6 +237,18 @@ class skip_list_node { pop.persist(&node, sizeof(node)); } + void + set_next(size_type level, node_pointer next) + { + assert(level < height()); + auto &node = get_next(level); + node.store(node_pointer{next.get(), true}, + std::memory_order_release); + /* instead of persist it immediately, mark it dirty, + * and rely on consequent get_next operation to flush. + */ + } + void set_nexts(const node_pointer *new_nexts, size_type h) { @@ -235,7 +256,7 @@ class skip_list_node { auto *nexts = get_nexts(); for (size_type i = 0; i < h; i++) { - nexts[i].store(new_nexts[i], std::memory_order_relaxed); + nexts[i].store(node_pointer{new_nexts[i].get(), true}, std::memory_order_relaxed); } } @@ -326,12 +347,14 @@ class skip_list_iterator { { } - reference operator*() const + reference + operator*() const { return *(node->get()); } - pointer operator->() const + pointer + operator->() const { return node->get(); } @@ -492,8 +515,11 @@ class concurrent_skip_list { using const_reference = const value_type &; using pointer = typename allocator_traits_type::pointer; using const_pointer = typename allocator_traits_type::const_pointer; + using use_persistent_aware_ptr = + typename traits_type::use_persistent_aware_ptr; - using list_node_type = skip_list_node; + using list_node_type = + skip_list_node; using iterator = skip_list_iterator; using const_iterator = skip_list_iterator; @@ -509,7 +535,8 @@ class concurrent_skip_list { using node_ptr = list_node_type *; using const_node_ptr = const list_node_type *; using persistent_node_ptr = - obj::experimental::self_relative_ptr; + obj::experimental::self_relative_ptr; using prev_array_type = std::array; using next_array_type = std::array; @@ -1080,7 +1107,7 @@ class concurrent_skip_list { */ template std::pair - emplace(Args &&... args) + emplace(Args &&...args) { return internal_emplace(std::forward(args)...); } @@ -1117,7 +1144,7 @@ class concurrent_skip_list { */ template iterator - emplace_hint(const_iterator hint, Args &&... args) + emplace_hint(const_iterator hint, Args &&...args) { /* Ignore hint */ return emplace(std::forward(args)...).first; @@ -1148,7 +1175,7 @@ class concurrent_skip_list { */ template std::pair - try_emplace(const key_type &k, Args &&... args) + try_emplace(const key_type &k, Args &&...args) { return internal_try_emplace(k, std::forward(args)...); } @@ -1178,7 +1205,7 @@ class concurrent_skip_list { */ template std::pair - try_emplace(key_type &&k, Args &&... args) + try_emplace(key_type &&k, Args &&...args) { return internal_try_emplace(std::move(k), std::forward(args)...); @@ -1215,7 +1242,7 @@ class concurrent_skip_list { has_is_transparent::value && std::is_constructible::value, std::pair>::type - try_emplace(K &&k, Args &&... args) + try_emplace(K &&k, Args &&...args) { return internal_try_emplace(std::forward(k), std::forward(args)...); @@ -2301,6 +2328,27 @@ class concurrent_skip_list { sizeof(decltype(size_diff)) - sizeof(decltype(insert_stage))]; }; + + class swmr_tls_data : public obj::segment_vector { + public: + using base_type = obj::segment_vector; + swmr_tls_data() {} + ~swmr_tls_data() = default; + tls_entry_type& local() { + if (this->size() == 0) { + this->resize(1); + } + return this->front(); + } + + private: + obj::pool_base get_pool() const noexcept { + auto pop = pmemobj_pool_by_ptr(this); + assert(pop != nullptr); + return obj::pool_base(pop); + } + }; + static_assert(sizeof(tls_entry_type) == 64, "The size of tls_entry_type should be 64 bytes."); @@ -2425,14 +2473,12 @@ class concurrent_skip_list { assert(level < prev->height()); persistent_node_ptr next = prev->next(level); pointer_type curr = next.get(); - while (curr && cmp(get_key(curr), key)) { prev = curr; assert(level < prev->height()); next = prev->next(level); curr = next.get(); } - return next; } @@ -2491,7 +2537,7 @@ class concurrent_skip_list { template std::pair - internal_try_emplace(K &&key, Args &&... args) + internal_try_emplace(K &&key, Args &&...args) { return internal_insert( key, std::piecewise_construct, @@ -2501,7 +2547,7 @@ class concurrent_skip_list { template std::pair - internal_emplace(Args &&... args) + internal_emplace(Args &&...args) { check_outside_tx(); tls_entry_type &tls_entry = tls_data.local(); @@ -2555,7 +2601,7 @@ class concurrent_skip_list { */ template std::pair - internal_unsafe_emplace(Args &&... args) + internal_unsafe_emplace(Args &&...args) { check_tx_stage_work(); @@ -2592,7 +2638,7 @@ class concurrent_skip_list { */ template std::pair - internal_insert(const K &key, Args &&... args) + internal_insert(const K &key, Args &&...args) { check_outside_tx(); tls_entry_type &tls_entry = tls_data.local(); @@ -2640,7 +2686,6 @@ class concurrent_skip_list { do { find_insert_pos(prev_nodes, next_nodes, key); - node_ptr next = next_nodes[0].get(); if (next && !allow_multimapping && !_compare(key, get_key(next))) { @@ -2748,8 +2793,9 @@ class concurrent_skip_list { return true; } - bool - try_lock_nodes(size_type height, prev_array_type &prevs, + template + typename std::enable_if::value, bool>::type + try_lock_nodes_impl(size_type height, prev_array_type &prevs, const next_array_type &nexts, lock_array &locks) { assert(check_prev_array(prevs, height)); @@ -2770,6 +2816,21 @@ class concurrent_skip_list { return true; } + template + typename std::enable_if::value, bool>::type + try_lock_nodes_impl(size_type height, prev_array_type &prevs, + const next_array_type &nexts, lock_array &locks) + { + return true; + } + + bool + try_lock_nodes(size_type height, prev_array_type &prevs, + const next_array_type &nexts, lock_array &locks) + { + return try_lock_nodes_impl(height, prevs, nexts, locks); + } + /** * Returns an iterator pointing to the first element from the list for * which cmp(element, key) is false. @@ -2996,7 +3057,7 @@ class concurrent_skip_list { /** Creates new node */ template persistent_node_ptr - create_node(Args &&... args) + create_node(Args &&...args) { size_type levels = random_level(); @@ -3066,15 +3127,13 @@ class concurrent_skip_list { */ template persistent_node_ptr - creates_dummy_node(size_type height, Args &&... args) + creates_dummy_node(size_type height, Args &&...args) { assert(pmemobj_tx_stage() == TX_STAGE_WORK); size_type sz = calc_node_size(height); - persistent_node_ptr n = node_allocator_traits::allocate(_node_allocator, sz) .raw(); - assert(n != nullptr); node_allocator_traits::construct(_node_allocator, n.get(), @@ -3238,7 +3297,12 @@ class concurrent_skip_list { random_level_generator_type _rnd_generator; persistent_node_ptr dummy_head; - enumerable_thread_specific tls_data; +// using tls_data_storage = enumerable_thread_specific; + using tls_data_storage = typename std::conditional::value, + swmr_tls_data, + enumerable_thread_specific>::type; + + tls_data_storage tls_data; std::atomic _size; @@ -3252,7 +3316,7 @@ class concurrent_skip_list { template + size_t MAX_LEVEL, typename UsePersistentAwarePtr = std::false_type> class map_traits { public: static constexpr size_t max_level = MAX_LEVEL; @@ -3264,6 +3328,7 @@ class map_traits { using reference = value_type &; using const_reference = const value_type &; using allocator_type = Allocator; + using use_persistent_aware_ptr = UsePersistentAwarePtr; /** * pmem::detail::concurrent_skip_list allows multimapping. If this flag diff --git a/include/libpmemobj++/detail/self_relative_ptr_base_impl.hpp b/include/libpmemobj++/detail/self_relative_ptr_base_impl.hpp index a6f3e9c751..1ddbc549d3 100644 --- a/include/libpmemobj++/detail/self_relative_ptr_base_impl.hpp +++ b/include/libpmemobj++/detail/self_relative_ptr_base_impl.hpp @@ -44,7 +44,7 @@ class self_relative_ptr_base_impl { using byte_type = uint8_t; using byte_ptr_type = byte_type *; using const_byte_ptr_type = const byte_type *; - + static constexpr difference_type dirty_flag = ~(1L << 1); /* * Constructors */ @@ -159,7 +159,8 @@ class self_relative_ptr_base_impl { /** * Explicit conversion operator to void* */ - explicit operator void *() const noexcept + explicit + operator void *() const noexcept { return to_void_pointer(); } @@ -190,7 +191,24 @@ class self_relative_ptr_base_impl { { return offset == nullptr_offset; } - + /** + * check if offset is dirty + */ + static inline bool + is_dirty(difference_type other_offset) + { + return ((other_offset != nullptr_offset) && !((other_offset>>1) & 1)); + } + bool is_dirty() + { + return (!is_null() && !((difference_type(offset)>>1) & 1)); + } + void set_dirty_flag(bool dirty) + { + intptr_t dirty_mask = dirty == true; + --dirty_mask; + offset &= (dirty_mask | dirty_flag); + } protected: /** * Offset constructor. @@ -222,8 +240,10 @@ class self_relative_ptr_base_impl { */ uintptr_t mask = other_offset == nullptr_offset; --mask; + /* clear the dirty_flag if it's set to get the correct ptr. */ uintptr_t ptr = static_cast( - reinterpret_cast(this) + other_offset + 1); + reinterpret_cast(this) + + (other_offset | ~dirty_flag) + 1); ptr &= mask; return reinterpret_cast(ptr); } diff --git a/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp deleted file mode 100644 index 08233421b5..0000000000 --- a/include/libpmemobj++/experimental/atomic_pa_self_relative_ptr.hpp +++ /dev/null @@ -1,306 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2021, 4Paradigm Inc. */ - -#ifndef LIBPMEMOBJ_CPP_ATOMIC_PA_SELF_RELATIVE_PTR_HPP -#define LIBPMEMOBJ_CPP_ATOMIC_PA_SELF_RELATIVE_PTR_HPP - -#include -#include -#include -#include - -#include - -namespace std -{ -/** - * Atomic specialization for pa_self_relative_ptr - * - * Doesn't automatically add itself to the transaction. - * The user is responsible for persisting the data. - */ -template -struct atomic> { -private: - using ptr_type = pmem::detail::self_relative_ptr_base_impl< - std::atomic>; - using accessor = pmem::detail::self_relative_accessor< - std::atomic>; - -public: - using this_type = atomic; - using value_type = pmem::obj::experimental::pa_self_relative_ptr; - using difference_type = typename value_type::difference_type; - - /* - * Constructors - */ - - constexpr atomic() noexcept = default; - atomic(value_type value) : ptr() - { - store(value); - } - atomic(const atomic &) = delete; - - void - store(value_type desired, - std::memory_order order = std::memory_order_seq_cst) noexcept - { - auto offset = accessor::pointer_to_offset(ptr, desired.get()); - LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(order, &ptr); - accessor::get_offset(ptr).store( - (offset & desired.flush_set_mask()), order); - } - - value_type - load(std::memory_order order = std::memory_order_seq_cst) const noexcept - { - auto offset = accessor::get_offset(ptr).load(order); - LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(order, &ptr); - auto pointer = accessor::offset_to_pointer( - offset | ~(value_type::flush_set_mask(offset)), ptr); - return value_type{pointer, value_type::flush_needed(offset)}; - } - - value_type - exchange(value_type desired, - std::memory_order order = std::memory_order_seq_cst) noexcept - { - auto new_offset = - accessor::pointer_to_offset(ptr, desired.get()); - auto old_offset = accessor::get_offset(ptr).exchange( - new_offset & desired.flush_set_mask(), order); - return value_type{accessor::offset_to_pointer( - old_offset | - ~(value_type::flush_set_mask( - old_offset)), - ptr), - value_type::flush_needed(old_offset)}; - } - - bool - compare_exchange_weak(value_type &expected, value_type desired, - std::memory_order success, - std::memory_order failure) noexcept - { - auto expected_offset = - accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = - expected_offset & expected.flush_set_mask(); - auto desired_offset = - accessor::pointer_to_offset(ptr, desired.get()); - auto desired_actual = desired_offset & desired.flush_set_mask(); - bool result = accessor::get_offset(ptr).compare_exchange_weak( - expected_actual, desired_actual, success, failure); - if (!result) { - expected = value_type{ - accessor::offset_to_pointer( - expected_actual | - ~(value_type::flush_set_mask( - expected_actual)), - ptr), - value_type::flush_needed(expected_actual)}; - } - return result; - } - - bool - compare_exchange_weak( - value_type &expected, value_type desired, - std::memory_order order = std::memory_order_seq_cst) noexcept - { - auto expected_offset = - accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = - expected_offset & expected.flush_set_mask(); - auto desired_offset = - accessor::pointer_to_offset(ptr, desired.get()); - auto desired_actual = desired_offset & desired.flush_set_mask(); - bool result = accessor::get_offset(ptr).compare_exchange_weak( - expected_actual, desired_actual, order); - if (!result) { - expected = value_type{ - accessor::offset_to_pointer( - expected_actual | - ~(value_type::flush_set_mask( - expected_actual)), - ptr), - value_type::flush_needed(expected_actual)}; - } - return result; - } - - bool - compare_exchange_strong(value_type &expected, value_type desired, - std::memory_order success, - std::memory_order failure) noexcept - { - auto expected_offset = - accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = - expected_offset & expected.flush_set_mask(); - auto desired_offset = - accessor::pointer_to_offset(ptr, desired.get()); - auto desired_actual = desired_offset & desired.flush_set_mask(); - bool result = accessor::get_offset(ptr).compare_exchange_strong( - expected_actual, desired_actual, success, failure); - if (!result) { - expected = value_type{ - accessor::offset_to_pointer( - expected_actual | - ~(value_type::flush_set_mask( - expected_actual)), - ptr), - value_type::flush_needed(expected_actual)}; - } - return result; - } - - bool - compare_exchange_strong( - value_type &expected, value_type desired, - std::memory_order order = std::memory_order_seq_cst) noexcept - { - auto expected_offset = - accessor::pointer_to_offset(ptr, expected.get()); - auto expected_actual = - expected_offset & expected.flush_set_mask(); - auto desired_offset = - accessor::pointer_to_offset(ptr, desired.get()); - auto desired_actual = desired_offset & desired.flush_set_mask(); - bool result = accessor::get_offset(ptr).compare_exchange_strong( - expected_actual, desired_actual, order); - if (!result) { - expected = value_type{ - accessor::offset_to_pointer( - expected_actual | - ~(value_type::flush_set_mask( - expected_actual)), - ptr), - value_type::flush_needed(expected_actual)}; - } - return result; - } - - value_type - fetch_add(difference_type val, - std::memory_order order = std::memory_order_seq_cst) noexcept - { - auto offset = accessor::get_offset(ptr).fetch_add( - val * static_cast(sizeof(T)), order); - return value_type{ - accessor::offset_to_pointer( - offset | ~(value_type::flush_set_mask(offset)), - ptr), - value_type::flush_needed(offset)}; - } - - value_type - fetch_sub(difference_type val, - std::memory_order order = std::memory_order_seq_cst) noexcept - { - auto offset = accessor::get_offset(ptr).fetch_sub( - val * static_cast(sizeof(T)), order); - return value_type{ - accessor::offset_to_pointer( - offset | ~(value_type::flush_set_mask(offset)), - ptr), - value_type::flush_needed(offset)}; - } - - bool - is_lock_free() const noexcept - { - return accessor::get_offset(ptr).is_lock_free(); - } - - /* - * Operators - */ - - operator value_type() const noexcept - { - return load(); - } - - atomic &operator=(const atomic &) = delete; - atomic &operator=(const atomic &) volatile = delete; - - value_type - operator=(value_type desired) noexcept - { - store(desired); - return desired; - } - - value_type - operator++() noexcept - { - return this->fetch_add(1) + 1; - } - - value_type - operator++(int) noexcept - { - return this->fetch_add(1); - } - - value_type - operator--() noexcept - { - return this->fetch_sub(1) - 1; - } - - value_type - operator--(int) noexcept - { - return this->fetch_sub(1); - } - - value_type - operator+=(difference_type diff) noexcept - { - return this->fetch_add(diff) + diff; - } - - value_type - operator-=(difference_type diff) noexcept - { - return this->fetch_sub(diff) - diff; - } - -private: - ptr_type ptr; -}; - -} /* namespace std */ - -namespace pmem -{ - -namespace detail -{ - -/** - * can_do_snapshot atomic specialization for pa_self_relative_ptr. Not thread - * safe. - * - * Use in a single threaded environment only. - */ -template -struct can_do_snapshot< - std::atomic>> { - using snapshot_type = obj::experimental::pa_self_relative_ptr; - static constexpr bool value = sizeof(std::atomic) == - sizeof(typename snapshot_type::offset_type); - static_assert( - value, - "std::atomic should be the same size"); -}; - -} /* namespace detail */ - -} /* namespace pmem */ - -#endif diff --git a/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp b/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp index d1488cdb2e..908617d032 100644 --- a/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/atomic_self_relative_ptr.hpp @@ -19,8 +19,12 @@ namespace std * Doesn't automatically add itself to the transaction. * The user is responsible for persisting the data. */ + +template +struct atomic>; + template -struct atomic> { +struct atomic> { private: using ptr_type = pmem::detail::self_relative_ptr_base_impl< std::atomic>; @@ -29,7 +33,8 @@ struct atomic> { public: using this_type = atomic; - using value_type = pmem::obj::experimental::self_relative_ptr; + using value_type = + pmem::obj::experimental::self_relative_ptr; using difference_type = typename value_type::difference_type; /* @@ -82,7 +87,6 @@ struct atomic> { accessor::pointer_to_offset(ptr, expected.get()); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); - bool result = accessor::get_offset(ptr).compare_exchange_weak( expected_offset, desired_offset, success, failure); if (!result) { @@ -105,7 +109,6 @@ struct atomic> { accessor::pointer_to_offset(ptr, expected.get()); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); - bool result = accessor::get_offset(ptr).compare_exchange_weak( expected_offset, desired_offset, order); if (!result) { @@ -128,7 +131,6 @@ struct atomic> { accessor::pointer_to_offset(ptr, expected.get()); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); - bool result = accessor::get_offset(ptr).compare_exchange_strong( expected_offset, desired_offset, success, failure); if (!result) { @@ -151,7 +153,6 @@ struct atomic> { accessor::pointer_to_offset(ptr, expected.get()); auto desired_offset = accessor::pointer_to_offset(ptr, desired.get()); - bool result = accessor::get_offset(ptr).compare_exchange_strong( expected_offset, desired_offset, order); if (!result) { @@ -268,6 +269,350 @@ struct atomic> { } } + pmem::obj::pool_base + get_pool_base() const + { + PMEMobjpool *pop = pmemobj_pool_by_ptr(this); + return pmem::obj::pool_base(pop); + } + +private: + ptr_type ptr; +}; + +template +struct atomic> { +private: + using ptr_type = pmem::detail::self_relative_ptr_base_impl< + std::atomic>; + using accessor = pmem::detail::self_relative_accessor< + std::atomic>; + +public: + using this_type = atomic; + using value_type = pmem::obj::experimental::self_relative_ptr; + using difference_type = typename value_type::difference_type; + + /* + * Constructors + */ + + constexpr atomic() noexcept = default; + atomic(value_type value) : ptr() + { + store(value); + } + atomic(const atomic &) = delete; + + void + store(value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto offset = accessor::pointer_to_offset(ptr, desired.get()); + LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_BEFORE(order, &ptr); + intptr_t dirty_mask = desired.is_dirty(); + --dirty_mask; + offset &= (dirty_mask | ptr_type::dirty_flag); + accessor::get_offset(ptr).store(offset, order); + } + + value_type + load(std::memory_order order = std::memory_order_seq_cst) const noexcept + { + auto offset = accessor::get_offset(ptr).load(order); + LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(order, &ptr); + if (ptr_type::is_dirty(offset)) { + auto &target = accessor::get_offset(ptr); + get_pool_base().persist(&target, sizeof(target)); + } + auto pointer = accessor::offset_to_pointer(offset, ptr); + return value_type{pointer, ptr_type::is_dirty(offset)}; + } + + value_type + persist_load( + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto offset = accessor::get_offset(ptr).load(order); + LIBPMEMOBJ_CPP_ANNOTATE_HAPPENS_AFTER(order, &ptr); + intptr_t dirty_mask; + if ((dirty_mask = (ptr_type::is_dirty(offset) == true)) > 0) { + auto &target = accessor::get_offset(ptr); + get_pool_base().persist(&target, sizeof(target)); + } + auto desired = offset; + while (true) { + --dirty_mask; + desired = offset | ~(ptr_type::dirty_flag | dirty_mask); + if (accessor::get_offset(ptr).compare_exchange_weak( + offset, desired) || + !ptr_type::is_dirty(offset)) { + break; + } + auto &target = accessor::get_offset(ptr); + get_pool_base().persist(&target, sizeof(target)); + dirty_mask = ptr_type::is_dirty(offset) == true; + } + auto pointer = accessor::offset_to_pointer(offset, ptr); + return value_type{pointer, ptr_type::is_dirty(offset)}; + } + value_type + exchange(value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto new_offset = + accessor::pointer_to_offset(ptr, desired.get()); + intptr_t dirty_mask = desired.is_dirty(); + --dirty_mask; + new_offset &= (dirty_mask | ptr_type::dirty_flag); + auto old_offset = + accessor::get_offset(ptr).exchange(new_offset, order); + return value_type{ + accessor::offset_to_pointer(old_offset, ptr), + ptr_type::is_dirty(old_offset)}; + } + + bool + compare_exchange_weak(value_type &expected, value_type desired, + std::memory_order success, + std::memory_order failure) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + intptr_t dirty_mask = expected.is_dirty(); + --dirty_mask; + expected_offset &= (dirty_mask | ptr_type::dirty_flag); + dirty_mask = desired.is_dirty(); + --dirty_mask; + desired_offset &= (dirty_mask | ptr_type::dirty_flag); + bool result = accessor::get_offset(ptr).compare_exchange_weak( + expected_offset, desired_offset, success, failure); + if (!result) { + try { + expected = accessor::offset_to_pointer( + expected_offset, ptr); + expected.set_dirty_flag( + ptr_type::is_dirty(expected_offset)); + } catch (...) { + std::terminate(); + } + } + return result; + } + + bool + compare_exchange_weak( + value_type &expected, value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + intptr_t dirty_mask = expected.is_dirty(); + --dirty_mask; + expected_offset &= (dirty_mask | ptr_type::dirty_flag); + dirty_mask = desired.is_dirty(); + --dirty_mask; + desired_offset &= (dirty_mask | ptr_type::dirty_flag); + bool result = accessor::get_offset(ptr).compare_exchange_weak( + expected_offset, desired_offset, order); + if (!result) { + try { + expected = accessor::offset_to_pointer( + expected_offset, ptr); + expected.set_dirty_flag( + ptr_type::is_dirty(expected_offset)); + } catch (...) { + std::terminate(); + } + } + return result; + } + + bool + compare_exchange_strong(value_type &expected, value_type desired, + std::memory_order success, + std::memory_order failure) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + intptr_t dirty_mask = expected.is_dirty(); + --dirty_mask; + expected_offset &= (dirty_mask | ptr_type::dirty_flag); + dirty_mask = desired.is_dirty(); + --dirty_mask; + desired_offset &= (dirty_mask | ptr_type::dirty_flag); + bool result = accessor::get_offset(ptr).compare_exchange_strong( + expected_offset, desired_offset, success, failure); + if (!result) { + try { + expected = accessor::offset_to_pointer( + expected_offset, ptr); + expected.set_dirty_flag( + ptr_type::is_dirty(expected_offset)); + } catch (...) { + std::terminate(); + } + } + return result; + } + + bool + compare_exchange_strong( + value_type &expected, value_type desired, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto expected_offset = + accessor::pointer_to_offset(ptr, expected.get()); + auto desired_offset = + accessor::pointer_to_offset(ptr, desired.get()); + intptr_t dirty_mask = expected.is_dirty(); + --dirty_mask; + expected_offset &= (dirty_mask | ptr_type::dirty_flag); + dirty_mask = desired.is_dirty(); + --dirty_mask; + desired_offset &= (dirty_mask | ptr_type::dirty_flag); + bool result = accessor::get_offset(ptr).compare_exchange_strong( + expected_offset, desired_offset, order); + if (!result) { + try { + expected = accessor::offset_to_pointer( + expected_offset, ptr); + expected.set_dirty_flag( + ptr_type::is_dirty(expected_offset)); + } catch (...) { + std::terminate(); + } + } + return result; + } + + value_type + fetch_add(difference_type val, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto offset = accessor::get_offset(ptr).fetch_add( + val * static_cast(sizeof(T)), order); + return value_type{accessor::offset_to_pointer(offset, ptr), + true}; + } + + value_type + fetch_sub(difference_type val, + std::memory_order order = std::memory_order_seq_cst) noexcept + { + auto offset = accessor::get_offset(ptr).fetch_sub( + val * static_cast(sizeof(T)), order); + return value_type{accessor::offset_to_pointer(offset, ptr), + true}; + } + + bool + is_lock_free() const noexcept + { + return accessor::get_offset(ptr).is_lock_free(); + } + + /* + * Operators + */ + + operator value_type() const noexcept + { + return load(); + } + + atomic &operator=(const atomic &) = delete; + atomic &operator=(const atomic &) volatile = delete; + + value_type + operator=(value_type desired) noexcept + { + store(desired); + return desired; + } + + value_type + operator&=(difference_type flag) noexcept + { + auto old_offset = accessor::get_offset(ptr).load(); + auto new_offset = (old_offset & flag); + accessor::get_offset(ptr).exchange(new_offset); + return load(); + } + + value_type + operator++() noexcept + { + try { + return this->fetch_add(1) + 1; + } catch (...) { + /* This should never happen during normal program + * execution */ + std::terminate(); + } + } + + value_type + operator++(int) noexcept + { + return this->fetch_add(1); + } + + value_type + operator--() noexcept + { + try { + return this->fetch_sub(1) - 1; + } catch (...) { + /* This should never happen during normal program + * execution */ + std::terminate(); + } + } + + value_type + operator--(int) noexcept + { + return this->fetch_sub(1); + } + + value_type + operator+=(difference_type diff) noexcept + { + try { + return this->fetch_add(diff) + diff; + } catch (...) { + /* This should never happen during normal program + * execution */ + std::terminate(); + } + } + + value_type + operator-=(difference_type diff) noexcept + { + try { + return this->fetch_sub(diff) - diff; + } catch (...) { + /* This should never happen during normal program + * execution */ + std::terminate(); + } + } + + pmem::obj::pool_base + get_pool_base() const + { + PMEMobjpool *pop = pmemobj_pool_by_ptr(this); + return pmem::obj::pool_base(pop); + } + private: ptr_type ptr; }; @@ -285,9 +630,11 @@ namespace detail * * Use in a single threaded environment only. */ -template -struct can_do_snapshot>> { - using snapshot_type = obj::experimental::self_relative_ptr; +template +struct can_do_snapshot< + std::atomic>> { + using snapshot_type = + obj::experimental::self_relative_ptr; static constexpr bool value = sizeof(std::atomic) == sizeof(typename snapshot_type::offset_type); static_assert(value, diff --git a/include/libpmemobj++/experimental/concurrent_map.hpp b/include/libpmemobj++/experimental/concurrent_map.hpp index e95c787e70..c738f5db96 100644 --- a/include/libpmemobj++/experimental/concurrent_map.hpp +++ b/include/libpmemobj++/experimental/concurrent_map.hpp @@ -46,10 +46,10 @@ template , class concurrent_map : public detail::concurrent_skip_list> { + false, 64, std::false_type>> { using traits_type = detail::map_traits; + Allocator, false, 64, std::false_type>; using base_type = pmem::detail::concurrent_skip_list; public: diff --git a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp b/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp deleted file mode 100644 index 5b0aab3163..0000000000 --- a/include/libpmemobj++/experimental/pa_self_relative_ptr.hpp +++ /dev/null @@ -1,818 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2021, 4Paradigm Inc. */ - -#ifndef LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP -#define LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP - -#include -#include -#include -#include - -/* To prevent concurrent read ops to see non-persistent data updated by a - * concurrent write op in PMem, expensive locks/txns are commonly used for - * write op, e.g., locks the data object during updates and flush, unlock it - * after flush is finished, or encapsulate the updates and flush in a PMDK txns. - * Persistent-aware self_relative_ptr aims to solve this problem without using - * locks or txns. - * - * Based on self_relative_ptr, the definition of the stored offset - * is real_offset-1, so for 8-byte aligned allocation, the lower 3 bits - * of the stored offset are always 1 (except null_ptr). Therefore, the second - * lowest bit can be used as the indicator of whether the - * pa_self_relative_ptr (persistent-aware self_relative_ptr) itself needs - * explicit flush. If a consequent (atomic) read sees the second lowest bit is - * 0, it uses CAS to reset the bit to 1 and explicitly performs the flush. - * */ - -#define kFlushNeeded ~(1L << 1) -// flag &= kFlushNeeded, to indicate it needs flush -#define FlushNeeded(offset) (!((offset >> 1) & 1)) -// return true if needs explicit flush, false otherwise. - -namespace pmem -{ -namespace obj -{ -namespace experimental -{ - -template -class pa_self_relative_ptr; - -template <> -class pa_self_relative_ptr : public self_relative_ptr_base { -public: - using base_type = self_relative_ptr_base; - using this_type = pa_self_relative_ptr; - using element_type = void; - - constexpr pa_self_relative_ptr() noexcept = default; - - constexpr pa_self_relative_ptr(std::nullptr_t) noexcept - : self_relative_ptr_base() - { - } - - pa_self_relative_ptr(element_type *ptr) noexcept - : self_relative_ptr_base(self_offset(ptr)) - { - } - - inline element_type * - get() const noexcept - { - return static_cast(this->to_void_pointer()); - } - -private: - difference_type - self_offset(element_type *ptr) const noexcept - { - return base_type::pointer_to_offset(static_cast(ptr)); - } -}; - -template -class pa_self_relative_ptr : public self_relative_ptr_base { -public: - using base_type = self_relative_ptr_base; - using this_type = pa_self_relative_ptr; - using element_type = typename pmem::detail::sp_element::type; - - /** - * Random access iterator requirements (members) - */ - - /** - * The self_relative_ptr iterator category. - */ - using iterator_category = std::random_access_iterator_tag; - - /** - * The self_relative_ptr difference type. - */ - using difference_type = typename base_type::difference_type; - - /** - * The type of the value pointed to by the self_relative_ptr. - */ - using value_type = T; - - /** - * The reference type of the value pointed to by the self_relative_ptr. - */ - using reference = T &; - - /* - * Constructors - */ - /** - * Default constructor, equal the nullptr - */ - constexpr pa_self_relative_ptr() noexcept = default; - - /** - * Nullptr constructor - */ - constexpr pa_self_relative_ptr(std::nullptr_t) noexcept - : self_relative_ptr_base() - { - } - /** - * Volatile pointer constructor. - * - * @param ptr volatile pointer, pointing to persistent memory. - */ - pa_self_relative_ptr(element_type *ptr, - bool flushNeeded = false) noexcept - : base_type(self_offset(ptr)) - { - intptr_t mask = (flushNeeded == true); - --mask; - this->offset &= (mask | kFlushNeeded); - } - - /** - * Constructor from persistent_ptr - */ - pa_self_relative_ptr(persistent_ptr ptr, - bool flushNeeded = false) noexcept - : base_type(self_offset(ptr.get())) - { - intptr_t mask = (flushNeeded == true); - --mask; - this->offset &= (mask | kFlushNeeded); - } - - /** - * PMEMoid constructor. - * - * Provided for easy interoperability between C++ and C API's. - * - * @param oid C-style persistent pointer - */ - pa_self_relative_ptr(PMEMoid oid, bool flushNeeded = false) noexcept - : base_type(self_offset( - static_cast(pmemobj_direct(oid)))) - { - intptr_t mask = (flushNeeded == true); - --mask; - this->offset &= (mask | kFlushNeeded); - } - - /** - * Copy constructor - */ - pa_self_relative_ptr(const pa_self_relative_ptr &ptr) noexcept - : base_type(ptr) - { - /* this->offset &= ptr.flush_set_mask(); */ - /* the base_type calculates the offset based on the difference - * of the volatile ptr, so no need to reset the mask. */ - } - - /** - * Copy constructor from a different pa_self_relative_ptr<>. - * - * Available only for convertible, non-void types. - */ - template < - typename U, - typename = typename std::enable_if< - !std::is_same< - typename std::remove_cv::type, - typename std::remove_cv::type>::value && - !std::is_void::value, - decltype(static_cast(std::declval()))>::type> - pa_self_relative_ptr(pa_self_relative_ptr const &r) noexcept - : base_type(self_offset(static_cast(r.get()))) - { - this->offset &= r.flush_set_mask(); - /* the base_type set the offset using the volatile ptr, so - * the mask must be set manually */ - } - - ~pa_self_relative_ptr() - { - verify_type(); - } - /** - * Swaps two self_relative_ptr_base objects of the same type. - * - * @param[in,out] other the other self_relative_ptr to swap. - */ - void - swap(pa_self_relative_ptr &other) - { - if (this == &other) - return; - detail::conditional_add_to_tx(this); - detail::conditional_add_to_tx(&other); - auto first = this->to_byte_pointer(); - auto mask = this->flush_set_mask(); - auto second = other.to_byte_pointer(); - this->offset = pointer_to_offset(second); - this->offset &= other.flush_set_mask(); - /* pointer_to_offset after to_byte_pointer removes the - * flush_needed flag, so it needs to be set manually - * after the recalculation of offset */ - other.offset = other.pointer_to_offset(first); - other.offset &= mask; - } - - /** - * Conversion to byte pointer - */ - byte_ptr_type - to_byte_pointer() const noexcept - { - return static_cast(this->to_void_pointer()); - } - - /** - * Conversion to void* - */ - void * - to_void_pointer() const noexcept - { - return this->offset_to_pointer(this->offset); - } - - /** - * Explicit conversion operator to void* - */ - explicit operator void *() const noexcept - { - return to_void_pointer(); - } - - /** - * Explicit conversion operator to byte pointer - */ - explicit operator byte_ptr_type() const noexcept - { - return to_byte_pointer(); - } - - /** - * Byte distance between two relative pointers - */ - static difference_type - distance_between(const pa_self_relative_ptr &first, - const pa_self_relative_ptr &second) - { - return second.to_byte_pointer() - first.to_byte_pointer(); - } - - /** - * Get the direct pointer. - * - * @return the direct pointer to the object. - */ - inline element_type * - get() const noexcept - { - return static_cast(this->to_void_pointer()); - } - - /** - * Conversion to persitent ptr - */ - persistent_ptr - to_persistent_ptr() const - { - return persistent_ptr{this->get()}; - } - - /** - * Check if flush is needed - */ - inline bool - flush_needed() const - { - return (!is_null() && FlushNeeded(this->offset)); - } - - static inline bool - flush_needed(offset_type offset) - { - return ((offset != nullptr_offset) && FlushNeeded(offset)); - } - - /** - * return mask for caller to & in order to set the flush_needed flag. - * can also be used to clear the flag using offset |= ~flush_set_mask(). - */ - inline intptr_t - flush_set_mask() const - { - intptr_t mask = flush_needed(); - --mask; - return (mask | kFlushNeeded); - } - /** - * static version of flush_set_mask() for the given offset. - */ - static inline intptr_t - flush_set_mask(offset_type offset) - { - intptr_t mask = this_type::flush_needed(offset); - --mask; - return (mask | kFlushNeeded); - } - - /* - * Operators - */ - - /** - * Bool conversion operator. - */ - explicit operator bool() const noexcept - { - return !is_null(); - } - - /** - * Conversion operator to persistent_ptr - */ - operator persistent_ptr() const - { - return to_persistent_ptr(); - } - - /** - * Dereference operator. - */ - typename pmem::detail::sp_dereference::type operator*() const - noexcept - { - return *(this->get()); - } - - /** - * Member access operator. - */ - typename pmem::detail::sp_member_access::type operator->() const - noexcept - { - return this->get(); - } - - /** - * Array access operator. - * - * Contains run-time bounds checking for static arrays. - */ - template ::value>> - typename pmem::detail::sp_array_access::type - operator[](difference_type i) const noexcept - { - assert(i >= 0 && - (i < pmem::detail::sp_extent::value || - pmem::detail::sp_extent::value == 0) && - "persistent array index out of bounds"); - - return this->get()[i]; - } - - /** - * Assignment operator. - * - * self-relative pointer assignment within a transaction - * automatically registers this operation so that a rollback - * is possible. - * - * @throw pmem::transaction_error when adding the object to the - * transaction failed. - */ - pa_self_relative_ptr & - operator=(const pa_self_relative_ptr &r) - { - this->base_type::operator=(r); - return *this; - } - - /** - * Converting assignment operator from a different - * pa_self_relative_ptr<>. - * - * Available only for convertible types. - * Just like regular assignment, also automatically registers - * itself in a transaction. - * - * @throw pmem::transaction_error when adding the object - * to the transaction failed. - */ - template ::value>::type> - pa_self_relative_ptr & - operator=(pa_self_relative_ptr const &r) - { - this_type(r).swap(*this); - return *this; - } - - /** - * Nullptr move assignment operator. - * - * @throw pmem::transaction_error when adding the object to the - * transaction failed. - */ - pa_self_relative_ptr &operator=(std::nullptr_t) - { - detail::conditional_add_to_tx(this); - this->offset = self_offset(nullptr); - return *this; - } - - /** - * Prefix increment operator. - */ - inline pa_self_relative_ptr & - operator++() - { - detail::conditional_add_to_tx(this); - intptr_t mask = (this->flush_needed() == true); - --mask; - this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) + - static_cast(sizeof(T))); - return *this; - } - - /** - * Postfix increment operator. - */ - inline pa_self_relative_ptr - operator++(int) - { - auto copy = *this; - ++(*this); - - return copy; - } - - /** - * Prefix decrement operator. - */ - inline pa_self_relative_ptr & - operator--() - { - detail::conditional_add_to_tx(this); - intptr_t mask = (this->flush_needed() == true); - --mask; - this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) - - static_cast(sizeof(T))); - return *this; - } - - /** - * Postfix decrement operator. - */ - inline pa_self_relative_ptr - operator--(int) - { - auto copy = *this; - --(*this); - - return copy; - } - - /** - * Addition assignment operator. - */ - inline pa_self_relative_ptr & - operator+=(std::ptrdiff_t s) - { - detail::conditional_add_to_tx(this); - intptr_t mask = (this->flush_needed() == true); - --mask; - this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) + - s * static_cast(sizeof(T))); - return *this; - } - - /** - * Subtraction assignment operator. - */ - inline pa_self_relative_ptr & - operator-=(std::ptrdiff_t s) - { - detail::conditional_add_to_tx(this); - intptr_t mask = (this->flush_needed() == true); - --mask; - this->offset = (mask | kFlushNeeded) & - ((this->offset | ~kFlushNeeded) - - s * static_cast(sizeof(T))); - return *this; - } - -protected: - /** - * Verify if element_type is not polymorphic - */ - void - verify_type() - { - static_assert(!std::is_polymorphic::value, - "Polymorphic types are not supported"); - } - /** - * Conversion to void* use other offset - */ - void * - offset_to_pointer(difference_type other_offset) const noexcept - { - intptr_t mask = other_offset == nullptr_offset; - --mask; - return base_type::offset_to_pointer( - (other_offset | ~kFlushNeeded) & mask); - } - -private: - static constexpr difference_type nullptr_offset = 0; - difference_type - self_offset(element_type *ptr) const noexcept - { - return base_type::pointer_to_offset(static_cast(ptr)); - } - template - friend class pa_self_relative_accessor; -}; - -/** - * Swaps two pa_self_relative_ptr objects of the same type. - * - * Non-member swap function as required by Swappable concept. - * en.cppreference.com/w/cpp/concept/Swappable - */ -template -inline void -swap(pa_self_relative_ptr &a, pa_self_relative_ptr &b) -{ - a.swap(b); -} - -/** - * Equality operator. - */ -template -inline bool -operator==(pa_self_relative_ptr const &lhs, - pa_self_relative_ptr const &rhs) noexcept -{ - return lhs.to_byte_pointer() == rhs.to_byte_pointer(); -} - -/** - * Inequality operator. - */ -template -inline bool -operator!=(pa_self_relative_ptr const &lhs, - pa_self_relative_ptr const &rhs) noexcept -{ - return !(lhs == rhs); -} - -/** - * Equality operator with nullptr. - */ -template -inline bool -operator==(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept -{ - return !bool(lhs); -} - -/** - * Equality operator with nullptr. - */ -template -inline bool -operator==(std::nullptr_t, pa_self_relative_ptr const &lhs) noexcept -{ - return !bool(lhs); -} - -/** - * Inequality operator with nullptr. - */ -template -inline bool -operator!=(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept -{ - return bool(lhs); -} - -/** - * Inequality operator with nullptr. - */ -template -inline bool -operator!=(std::nullptr_t, pa_self_relative_ptr const &lhs) noexcept -{ - return bool(lhs); -} - -/** - * Less than operator. - * - * @return true if the sum(this, offset) of lhs is less than the sum(this, - * offset) of rhs. Returns false otherwise. - */ -template -inline bool -operator<(pa_self_relative_ptr const &lhs, - pa_self_relative_ptr const &rhs) noexcept -{ - return lhs.to_byte_pointer() < rhs.to_byte_pointer(); -} - -/** - * Less or equal than operator. - * - * See less than operator for comparison rules. - */ -template -inline bool -operator<=(pa_self_relative_ptr const &lhs, - pa_self_relative_ptr const &rhs) noexcept -{ - return !(rhs < lhs); -} - -/** - * Greater than operator. - * - * See less than operator for comparison rules. - */ -template -inline bool -operator>(pa_self_relative_ptr const &lhs, - pa_self_relative_ptr const &rhs) noexcept -{ - return (rhs < lhs); -} - -/** - * Greater or equal than operator. - * - * See less than operator for comparison rules. - */ -template -inline bool -operator>=(pa_self_relative_ptr const &lhs, - pa_self_relative_ptr const &rhs) noexcept -{ - return !(lhs < rhs); -} - -/* nullptr comparisons */ - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator<(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept -{ - return std::less::element_type *>()( - lhs.get(), nullptr); -} - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator<(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept -{ - return std::less::element_type *>()( - nullptr, rhs.get()); -} - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator<=(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept -{ - return !(nullptr < lhs); -} - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator<=(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept -{ - return !(rhs < nullptr); -} - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator>(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept -{ - return nullptr < lhs; -} - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator>(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept -{ - return rhs < nullptr; -} - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator>=(pa_self_relative_ptr const &lhs, std::nullptr_t) noexcept -{ - return !(lhs < nullptr); -} - -/** - * Compare a pa_self_relative_ptr with a null pointer. - */ -template -inline bool -operator>=(std::nullptr_t, pa_self_relative_ptr const &rhs) noexcept -{ - return !(nullptr < rhs); -} - -/** - * Addition operator for self-relative pointers. - */ -template -inline pa_self_relative_ptr -operator+(pa_self_relative_ptr const &lhs, std::ptrdiff_t s) -{ - pa_self_relative_ptr ptr = lhs; - ptr += s; - return ptr; -} - -/** - * Subtraction operator for self-relative pointers. - */ -template -inline pa_self_relative_ptr -operator-(pa_self_relative_ptr const &lhs, std::ptrdiff_t s) -{ - pa_self_relative_ptr ptr = lhs; - ptr -= s; - return ptr; -} - -/** - * Subtraction operator for self-relative pointers of identical type. - * - * Calculates the offset difference. - * Calculating the difference of pointers from objects of - * different pools is not allowed. - */ -template ::type, - typename std::remove_cv::type>::value>> -inline ptrdiff_t -operator-(pa_self_relative_ptr const &lhs, - pa_self_relative_ptr const &rhs) -{ - return self_relative_ptr_base::distance_between(rhs, lhs) / - static_cast(sizeof(T)); -} - -/** - * Ostream operator - */ -template -std::ostream & -operator<<(std::ostream &os, pa_self_relative_ptr const &ptr) -{ - os << ptr.to_void_pointer(); - return os; -} - -} -} -} - -#endif // LIBPMEMOBJ_CPP_PA_SELF_RELATIVE_PTR_HPP diff --git a/include/libpmemobj++/experimental/self_relative_ptr.hpp b/include/libpmemobj++/experimental/self_relative_ptr.hpp index c1905be8ac..8ad8e65bb7 100644 --- a/include/libpmemobj++/experimental/self_relative_ptr.hpp +++ b/include/libpmemobj++/experimental/self_relative_ptr.hpp @@ -21,11 +21,11 @@ namespace obj namespace experimental { -template +template class self_relative_ptr; -template <> -class self_relative_ptr : public self_relative_ptr_base { +template +class self_relative_ptr : public self_relative_ptr_base { public: using base_type = self_relative_ptr_base; using this_type = self_relative_ptr; @@ -43,6 +43,14 @@ class self_relative_ptr : public self_relative_ptr_base { { } + self_relative_ptr(element_type *ptr, bool dirty) noexcept + : self_relative_ptr_base(self_offset(ptr)) + { + intptr_t dirty_mask = dirty == true; + --dirty_mask; + offset &= (dirty_mask | dirty_flag); + } + inline element_type * get() const noexcept { @@ -77,7 +85,7 @@ class self_relative_ptr : public self_relative_ptr_base { * * @includedoc shared/self_relative_pointer_implementation.txt */ -template +template class self_relative_ptr : public self_relative_ptr_base { public: using base_type = self_relative_ptr_base; @@ -125,6 +133,11 @@ class self_relative_ptr : public self_relative_ptr_base { { } + constexpr self_relative_ptr(std::nullptr_t, bool dirty) noexcept + : self_relative_ptr_base() + { + } + /** * Volatile pointer constructor. * @@ -135,6 +148,11 @@ class self_relative_ptr : public self_relative_ptr_base { { } + self_relative_ptr(element_type *ptr, bool dirty) noexcept + : self_relative_ptr_base(self_offset(ptr, dirty)) + { + } + /** * Constructor from persistent_ptr */ @@ -143,6 +161,11 @@ class self_relative_ptr : public self_relative_ptr_base { { } + self_relative_ptr(persistent_ptr ptr, bool dirty) noexcept + : self_relative_ptr_base(self_offset(ptr.get(),dirty)) + { + } + /** * PMEMoid constructor. * @@ -156,6 +179,12 @@ class self_relative_ptr : public self_relative_ptr_base { { } + self_relative_ptr(PMEMoid oid, bool dirty) noexcept + : self_relative_ptr_base(self_offset( + static_cast(pmemobj_direct(oid)), dirty)) + { + } + /** * Copy constructor */ @@ -177,7 +206,8 @@ class self_relative_ptr : public self_relative_ptr_base { typename std::remove_cv::type>::value && !std::is_void::value, decltype(static_cast(std::declval()))>::type> - self_relative_ptr(self_relative_ptr const &r) noexcept + self_relative_ptr( + self_relative_ptr const &r) noexcept : self_relative_ptr_base(self_offset(static_cast(r.get()))) { } @@ -231,8 +261,8 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Dereference operator. */ - typename pmem::detail::sp_dereference::type operator*() const - noexcept + typename pmem::detail::sp_dereference::type + operator*() const noexcept { return *(this->get()); } @@ -240,8 +270,8 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Member access operator. */ - typename pmem::detail::sp_member_access::type operator->() const - noexcept + typename pmem::detail::sp_member_access::type + operator->() const noexcept { return this->get(); } @@ -294,8 +324,8 @@ class self_relative_ptr : public self_relative_ptr_base { template ::value>::type> - self_relative_ptr & - operator=(self_relative_ptr const &r) + self_relative_ptr & + operator=(self_relative_ptr const &r) { this_type(r).swap(*this); return *this; @@ -317,7 +347,7 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Prefix increment operator. */ - inline self_relative_ptr & + inline self_relative_ptr & operator++() { detail::conditional_add_to_tx(this); @@ -329,7 +359,7 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Postfix increment operator. */ - inline self_relative_ptr + inline self_relative_ptr operator++(int) { auto copy = *this; @@ -341,7 +371,7 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Prefix decrement operator. */ - inline self_relative_ptr & + inline self_relative_ptr & operator--() { detail::conditional_add_to_tx(this); @@ -353,7 +383,7 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Postfix decrement operator. */ - inline self_relative_ptr + inline self_relative_ptr operator--(int) { auto copy = *this; @@ -365,7 +395,7 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Addition assignment operator. */ - inline self_relative_ptr & + inline self_relative_ptr & operator+=(std::ptrdiff_t s) { detail::conditional_add_to_tx(this); @@ -376,7 +406,7 @@ class self_relative_ptr : public self_relative_ptr_base { /** * Subtraction assignment operator. */ - inline self_relative_ptr & + inline self_relative_ptr & operator-=(std::ptrdiff_t s) { detail::conditional_add_to_tx(this); @@ -401,6 +431,14 @@ class self_relative_ptr : public self_relative_ptr_base { { return base_type::pointer_to_offset(static_cast(ptr)); } + difference_type + self_offset(element_type *ptr, bool dirty) const noexcept + { + intptr_t dirty_mask = dirty == true; + --dirty_mask; + return (base_type::pointer_to_offset(static_cast(ptr)) & (dirty_mask | dirty_flag)); + } + }; /** @@ -409,9 +447,10 @@ class self_relative_ptr : public self_relative_ptr_base { * Non-member swap function as required by Swappable concept. * en.cppreference.com/w/cpp/concept/Swappable */ -template +template inline void -swap(self_relative_ptr &a, self_relative_ptr &b) +swap(self_relative_ptr &a, + self_relative_ptr &b) { a.swap(b); } @@ -419,10 +458,10 @@ swap(self_relative_ptr &a, self_relative_ptr &b) /** * Equality operator. */ -template +template inline bool -operator==(self_relative_ptr const &lhs, - self_relative_ptr const &rhs) noexcept +operator==(self_relative_ptr const &lhs, + self_relative_ptr const &rhs) noexcept { return lhs.to_byte_pointer() == rhs.to_byte_pointer(); } @@ -430,10 +469,10 @@ operator==(self_relative_ptr const &lhs, /** * Inequality operator. */ -template +template inline bool -operator!=(self_relative_ptr const &lhs, - self_relative_ptr const &rhs) noexcept +operator!=(self_relative_ptr const &lhs, + self_relative_ptr const &rhs) noexcept { return !(lhs == rhs); } @@ -441,9 +480,10 @@ operator!=(self_relative_ptr const &lhs, /** * Equality operator with nullptr. */ -template +template inline bool -operator==(self_relative_ptr const &lhs, std::nullptr_t) noexcept +operator==(self_relative_ptr const &lhs, + std::nullptr_t) noexcept { return !bool(lhs); } @@ -451,9 +491,10 @@ operator==(self_relative_ptr const &lhs, std::nullptr_t) noexcept /** * Equality operator with nullptr. */ -template +template inline bool -operator==(std::nullptr_t, self_relative_ptr const &lhs) noexcept +operator==(std::nullptr_t, + self_relative_ptr const &lhs) noexcept { return !bool(lhs); } @@ -461,9 +502,10 @@ operator==(std::nullptr_t, self_relative_ptr const &lhs) noexcept /** * Inequality operator with nullptr. */ -template +template inline bool -operator!=(self_relative_ptr const &lhs, std::nullptr_t) noexcept +operator!=(self_relative_ptr const &lhs, + std::nullptr_t) noexcept { return bool(lhs); } @@ -471,9 +513,10 @@ operator!=(self_relative_ptr const &lhs, std::nullptr_t) noexcept /** * Inequality operator with nullptr. */ -template +template inline bool -operator!=(std::nullptr_t, self_relative_ptr const &lhs) noexcept +operator!=(std::nullptr_t, + self_relative_ptr const &lhs) noexcept { return bool(lhs); } @@ -484,10 +527,10 @@ operator!=(std::nullptr_t, self_relative_ptr const &lhs) noexcept * @return true if the sum(this, offset) of lhs is less than the sum(this, * offset) of rhs. Returns false otherwise. */ -template +template inline bool -operator<(self_relative_ptr const &lhs, - self_relative_ptr const &rhs) noexcept +operator<(self_relative_ptr const &lhs, + self_relative_ptr const &rhs) noexcept { return lhs.to_byte_pointer() < rhs.to_byte_pointer(); } @@ -497,10 +540,10 @@ operator<(self_relative_ptr const &lhs, * * See less than operator for comparison rules. */ -template +template inline bool -operator<=(self_relative_ptr const &lhs, - self_relative_ptr const &rhs) noexcept +operator<=(self_relative_ptr const &lhs, + self_relative_ptr const &rhs) noexcept { return !(rhs < lhs); } @@ -510,10 +553,10 @@ operator<=(self_relative_ptr const &lhs, * * See less than operator for comparison rules. */ -template +template inline bool -operator>(self_relative_ptr const &lhs, - self_relative_ptr const &rhs) noexcept +operator>(self_relative_ptr const &lhs, + self_relative_ptr const &rhs) noexcept { return (rhs < lhs); } @@ -523,10 +566,10 @@ operator>(self_relative_ptr const &lhs, * * See less than operator for comparison rules. */ -template +template inline bool -operator>=(self_relative_ptr const &lhs, - self_relative_ptr const &rhs) noexcept +operator>=(self_relative_ptr const &lhs, + self_relative_ptr const &rhs) noexcept { return !(lhs < rhs); } @@ -536,31 +579,34 @@ operator>=(self_relative_ptr const &lhs, /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator<(self_relative_ptr const &lhs, std::nullptr_t) noexcept +operator<(self_relative_ptr const &lhs, + std::nullptr_t) noexcept { - return std::less::element_type *>()( - lhs.get(), nullptr); + return std::less::element_type *>()(lhs.get(), nullptr); } /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator<(std::nullptr_t, self_relative_ptr const &rhs) noexcept +operator<(std::nullptr_t, + self_relative_ptr const &rhs) noexcept { - return std::less::element_type *>()( - nullptr, rhs.get()); + return std::less::element_type *>()(nullptr, rhs.get()); } /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator<=(self_relative_ptr const &lhs, std::nullptr_t) noexcept +operator<=(self_relative_ptr const &lhs, + std::nullptr_t) noexcept { return !(nullptr < lhs); } @@ -568,9 +614,10 @@ operator<=(self_relative_ptr const &lhs, std::nullptr_t) noexcept /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator<=(std::nullptr_t, self_relative_ptr const &rhs) noexcept +operator<=(std::nullptr_t, + self_relative_ptr const &rhs) noexcept { return !(rhs < nullptr); } @@ -578,9 +625,10 @@ operator<=(std::nullptr_t, self_relative_ptr const &rhs) noexcept /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator>(self_relative_ptr const &lhs, std::nullptr_t) noexcept +operator>(self_relative_ptr const &lhs, + std::nullptr_t) noexcept { return nullptr < lhs; } @@ -588,9 +636,10 @@ operator>(self_relative_ptr const &lhs, std::nullptr_t) noexcept /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator>(std::nullptr_t, self_relative_ptr const &rhs) noexcept +operator>(std::nullptr_t, + self_relative_ptr const &rhs) noexcept { return rhs < nullptr; } @@ -598,9 +647,10 @@ operator>(std::nullptr_t, self_relative_ptr const &rhs) noexcept /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator>=(self_relative_ptr const &lhs, std::nullptr_t) noexcept +operator>=(self_relative_ptr const &lhs, + std::nullptr_t) noexcept { return !(lhs < nullptr); } @@ -608,9 +658,10 @@ operator>=(self_relative_ptr const &lhs, std::nullptr_t) noexcept /** * Compare a self_relative_ptr with a null pointer. */ -template +template inline bool -operator>=(std::nullptr_t, self_relative_ptr const &rhs) noexcept +operator>=(std::nullptr_t, + self_relative_ptr const &rhs) noexcept { return !(nullptr < rhs); } @@ -618,11 +669,11 @@ operator>=(std::nullptr_t, self_relative_ptr const &rhs) noexcept /** * Addition operator for self-relative pointers. */ -template -inline self_relative_ptr -operator+(self_relative_ptr const &lhs, std::ptrdiff_t s) +template +inline self_relative_ptr +operator+(self_relative_ptr const &lhs, std::ptrdiff_t s) { - self_relative_ptr ptr = lhs; + self_relative_ptr ptr = lhs; ptr += s; return ptr; } @@ -630,11 +681,11 @@ operator+(self_relative_ptr const &lhs, std::ptrdiff_t s) /** * Subtraction operator for self-relative pointers. */ -template -inline self_relative_ptr -operator-(self_relative_ptr const &lhs, std::ptrdiff_t s) +template +inline self_relative_ptr +operator-(self_relative_ptr const &lhs, std::ptrdiff_t s) { - self_relative_ptr ptr = lhs; + self_relative_ptr ptr = lhs; ptr -= s; return ptr; } @@ -649,9 +700,11 @@ operator-(self_relative_ptr const &lhs, std::ptrdiff_t s) template ::type, - typename std::remove_cv::type>::value>> + typename std::remove_cv::type>::value>, + typename PersistentAware> inline ptrdiff_t -operator-(self_relative_ptr const &lhs, self_relative_ptr const &rhs) +operator-(self_relative_ptr const &lhs, + self_relative_ptr const &rhs) { return self_relative_ptr_base::distance_between(rhs, lhs) / static_cast(sizeof(T)); @@ -660,9 +713,9 @@ operator-(self_relative_ptr const &lhs, self_relative_ptr const &rhs) /** * Ostream operator */ -template +template std::ostream & -operator<<(std::ostream &os, self_relative_ptr const &ptr) +operator<<(std::ostream &os, self_relative_ptr const &ptr) { os << ptr.to_void_pointer(); return os; diff --git a/include/libpmemobj++/experimental/swmr_map.hpp b/include/libpmemobj++/experimental/swmr_map.hpp index 519187bdf1..c4212bab03 100644 --- a/include/libpmemobj++/experimental/swmr_map.hpp +++ b/include/libpmemobj++/experimental/swmr_map.hpp @@ -6,7 +6,7 @@ #include #include -#include +#include namespace pmem { @@ -20,13 +20,13 @@ namespace experimental template , typename Allocator = pmem::obj::allocator>> -class swmr_map : public detail::swmr_skip_list> { + Allocator, false, 64, std::true_type>> { using traits_type = detail::map_traits; - using base_type = pmem::detail::swmr_skip_list; + Allocator, false, 64, std::true_type>; + using base_type = pmem::detail::concurrent_skip_list; public: using key_type = typename base_type::key_type; diff --git a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp b/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp deleted file mode 100644 index b2ab866875..0000000000 --- a/include/libpmemobj++/experimental/swmr_skip_list_impl.hpp +++ /dev/null @@ -1,3165 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2021, 4Paradigm Inc. */ - -#ifndef PMEMOBJ_SWMR_SKIP_LIST_IMPL_HPP -#define PMEMOBJ_SWMR_SKIP_LIST_IMPL_HPP - -#include -#include -#include -#include -#include -#include /* for std::unique_lock */ -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -namespace pmem -{ -namespace detail -{ - -template > -class swmr_skip_list_node { -public: - using value_type = Value; - using size_type = std::size_t; - using reference = value_type &; - using const_reference = const value_type &; - using pointer = value_type *; - using const_pointer = const value_type *; - using node_pointer = - obj::experimental::pa_self_relative_ptr; - using atomic_node_pointer = std::atomic; - using mutex_type = Mutex; - using lock_type = LockType; - - swmr_skip_list_node(size_type levels) : height_(levels) - { - for (size_type lev = 0; lev < height_; ++lev) - detail::create(&get_next(lev), - nullptr); - - assert(height() == levels); -#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED - /* - * Valgrind does not understand atomic semantic and reports - * false-postives in drd and helgrind tools. - */ - for (size_type lev = 0; lev < height_; ++lev) { - VALGRIND_HG_DISABLE_CHECKING(&get_next(lev), - sizeof(get_next(lev))); - } -#endif - } - - swmr_skip_list_node(size_type levels, const node_pointer *new_nexts) - : height_(levels) - { - for (size_type lev = 0; lev < height_; ++lev) - detail::create(&get_next(lev), - new_nexts[lev]); - - assert(height() == levels); -#if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED - /* - * Valgrind does not understand atomic semantic and reports - * false-postives in drd and helgrind tools. - */ - for (size_type lev = 0; lev < height_; ++lev) { - VALGRIND_HG_DISABLE_CHECKING(&get_next(lev), - sizeof(get_next(lev))); - } -#endif - } - - ~swmr_skip_list_node() - { - for (size_type lev = 0; lev < height_; ++lev) - detail::destroy(get_next(lev)); - } - - swmr_skip_list_node(const swmr_skip_list_node &) = delete; - - swmr_skip_list_node &operator=(const swmr_skip_list_node &) = delete; - - pointer - get() noexcept - { - return &val; - } - - const_pointer - get() const noexcept - { - return &val; - } - - reference - value() - { - return *get(); - } - - /* return the node_pointer of the next node on specific level. - * if it is marked flush_needed(), use CAS to clear the flag and - * perform the flush before return. - * */ - node_pointer - next(size_type level) - { - assert(level < height()); - node_pointer current = - get_next(level).load(std::memory_order_acquire); - if (!current.flush_needed()) { - return current; - } - obj::pool_base pop = get_pool_base(); - node_pointer desired; - while (true) { - desired = node_pointer{current.get(), false}; - if (get_next(level).compare_exchange_weak(current, - desired)) { - auto &node = get_next(level); - pop.persist(&node, sizeof(node)); - break; - } - if (!current.flush_needed()) { - return current; - } - } - return desired; - } - - node_pointer - next(size_type level) const - { - assert(level < height()); - return get_next(level).load(std::memory_order_acquire); - } - - /** - * Can`t be called concurrently - * Should be called inside a transaction - */ - - void - set_next_tx(size_type level, node_pointer next) - { - assert(level < height()); - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - auto &node = get_next(level); - obj::flat_transaction::snapshot(&node); - node.store(next, std::memory_order_release); - } - - void - set_next(size_type level, node_pointer next) - { - assert(level < height()); - auto &node = get_next(level); - node.store(node_pointer{next.get(), true}, - std::memory_order_release); - /* pop.persist(&node, sizeof(node)); */ - /* instead of persist it immediately, mark it as flush_needed, - * and rely on consequent get_next operation to flush. */ - } - - void - set_nexts(const node_pointer *new_nexts, size_type h) - { - assert(h == height()); - auto *nexts = get_nexts(); - - for (size_type i = 0; i < h; i++) { - nexts[i].store(node_pointer{new_nexts[i].get(), true}, - std::memory_order_relaxed); - } - } - - void - set_nexts(obj::pool_base pop, const node_pointer *new_nexts, - size_type h) - { - set_nexts(new_nexts, h); - - auto *nexts = get_nexts(); - pop.persist(nexts, sizeof(nexts[0]) * h); - } - - /** @return number of layers */ - size_type - height() const - { - return height_; - } - - lock_type - acquire() - { - return lock_type(mutex); - } - -private: - atomic_node_pointer * - get_nexts() - { - return reinterpret_cast(this + 1); - } - - atomic_node_pointer & - get_next(size_type level) - { - auto *arr = get_nexts(); - return arr[level]; - } - - const atomic_node_pointer & - get_next(size_type level) const - { - auto *arr = - reinterpret_cast(this + 1); - return arr[level]; - } - obj::pool_base - get_pool_base() const - { - PMEMobjpool *pop = pmemobj_pool_by_ptr(this); - return obj::pool_base(pop); - } - - mutex_type mutex; - union { - value_type val; - }; - size_type height_; -}; - -template -class swmr_skip_list_iterator { - using node_type = NodeType; - using node_ptr = typename std::conditional::type; - friend class swmr_skip_list_iterator; - -public: - using value_type = typename node_type::value_type; - using iterator_category = std::forward_iterator_tag; - using difference_type = std::ptrdiff_t; - using reference = - typename std::conditional::type; - using pointer = typename std::conditional::type; - - swmr_skip_list_iterator() : node(nullptr) - { - } - - /** Copy constructor. */ - swmr_skip_list_iterator(const swmr_skip_list_iterator &other) - : node(other.node) - { - } - - /** Copy constructor for const iterator from non-const iterator */ - template ::type> - swmr_skip_list_iterator( - const swmr_skip_list_iterator &other) - : node(other.node) - { - } - - reference operator*() const - { - return *(node->get()); - } - - pointer operator->() const - { - return node->get(); - } - - swmr_skip_list_iterator & - operator++() - { - assert(node != nullptr); - node = node->next(0).get(); - return *this; - } - - swmr_skip_list_iterator - operator++(int) - { - swmr_skip_list_iterator tmp = *this; - ++*this; - return tmp; - } - - swmr_skip_list_iterator & - operator=(const swmr_skip_list_iterator &other) - { - node = other.node; - return *this; - } - -private: - explicit swmr_skip_list_iterator(node_type *n) : node(n) - { - } - - template ::type> - explicit swmr_skip_list_iterator(const node_type *n) : node(n) - { - } - - node_ptr node; - - template - friend class swmr_skip_list; - - template - friend bool operator==(const swmr_skip_list_iterator &lhs, - const swmr_skip_list_iterator &rhs); - - template - friend bool operator!=(const swmr_skip_list_iterator &lhs, - const swmr_skip_list_iterator &rhs); -}; - -template -bool -operator==(const swmr_skip_list_iterator &lhs, - const swmr_skip_list_iterator &rhs) -{ - return lhs.node == rhs.node; -} - -template -bool -operator!=(const swmr_skip_list_iterator &lhs, - const swmr_skip_list_iterator &rhs) -{ - return lhs.node != rhs.node; -} - -/** - * Persistent memory aware implementation of the concurrent skip list. The - * implementation is based on the lock-based concurrent skip list algorithm - * described in - * https://www.cs.tau.ac.il/~shanir/nir-pubs-web/Papers/OPODIS2006-BA.pdf. - * - * Our concurrent skip list implementation supports concurrent insertion and - * traversal, but not concurrent erasure. The erase method is prefixed with - * unsafe_, to indicate that there is no concurrency safety. - * - * Each time, the pool with swmr_skip_list is being opened, the - * swmr_skip_list requires runtime_initialize() to be called in order to - * restore the state after process restart. - * - * Traits template parameter allows to specify properties of the - * concurrent_ski_list. The Traits type should has the following member types: - * * key_type - type of the key - * * mapped_type - type of the mapped_value - * * value_type - type of the value stored inside the skip list node (e.g. - * pair). - * * compare_type - The comparison functor used to sort elements in the skip - * list. - * * allocator_type - The type of allocator used by the skip list. - * * max_level - The constant value which specify the number of layers in the - * skip list. - * * random_generator_type - The type of random generator used by the skip list. - * It should be thread-safe. - */ -template -class swmr_skip_list { -protected: - using traits_type = Traits; - using key_type = typename traits_type::key_type; - using mapped_type = typename traits_type::mapped_type; - using value_type = typename traits_type::value_type; - using size_type = std::size_t; - using difference_type = std::ptrdiff_t; - using key_compare = typename traits_type::compare_type; - using allocator_type = typename traits_type::allocator_type; - using allocator_traits_type = std::allocator_traits; - - using reference = value_type &; - using const_reference = const value_type &; - using pointer = typename allocator_traits_type::pointer; - using const_pointer = typename allocator_traits_type::const_pointer; - - using list_node_type = swmr_skip_list_node; - - using iterator = swmr_skip_list_iterator; - using const_iterator = swmr_skip_list_iterator; - - static constexpr size_type MAX_LEVEL = traits_type::max_level; - - using random_level_generator_type = geometric_level_generator< - typename traits_type::random_generator_type, MAX_LEVEL>; - using node_allocator_type = typename std::allocator_traits< - allocator_type>::template rebind_alloc; - using node_allocator_traits = typename std::allocator_traits< - allocator_type>::template rebind_traits; - using node_ptr = list_node_type *; - using const_node_ptr = const list_node_type *; - using persistent_node_ptr = - obj::experimental::pa_self_relative_ptr; - - using prev_array_type = std::array; - using next_array_type = std::array; - using node_lock_type = typename list_node_type::lock_type; - using lock_array = std::array; - -public: - static constexpr bool allow_multimapping = - traits_type::allow_multimapping; - - /** - * Default constructor. Construct empty skip list. - * - * @pre must be called in transaction scope. - * @throw pmem::pool_error if an object is not in persistent memory. - * @throw pmem::transaction_scope_error if constructor wasn't called in - * transaction. - */ - swmr_skip_list() - { - check_tx_stage_work(); - init(); - } - - /** - * Constructs an empty container. - * - * @param[in] comp comparison function object to use for all comparisons - * of keys. - * @param[in] alloc allocator to use for all memory allocations of this - * container. - * - * @pre must be called in transaction scope. - * - * @throw pmem::pool_error if an object is not in persistent memory. - * @throw pmem::transaction_scope_error if constructor wasn't called in - * transaction. - * @throw pmem::transaction_alloc_error when allocating memory for - * inserted elements in transaction failed. - */ - explicit swmr_skip_list(const key_compare &comp, - const allocator_type &alloc = allocator_type()) - : _node_allocator(alloc), _compare(comp) - { - check_tx_stage_work(); - init(); - } - - /** - * Constructs the container with the contents of the range [first, - * last). If multiple elements in the range have keys that compare - * equivalent, the first element is inserted. - * - * @param[in] first first iterator of inserted range. - * @param[in] last last iterator of inserted range. - * @param[in] comp comparison function object to use for all comparisons - * of keys. - * @param[in] alloc allocator to use for all memory allocations of this - * container. - * - * InputIt must meet the requirements of LegacyInputIterator. - * - * @pre must be called in transaction scope. - * - * @throw pmem::pool_error if an object is not in persistent memory. - * @throw pmem::transaction_scope_error if constructor wasn't called in - * transaction. - * @throw pmem::transaction_alloc_error when allocating memory for - * inserted elements in transaction failed. - * @throw rethrows element constructor exception. - */ - template - swmr_skip_list(InputIt first, InputIt last, - const key_compare &comp = key_compare(), - const allocator_type &alloc = allocator_type()) - : _node_allocator(alloc), _compare(comp) - { - check_tx_stage_work(); - init(); - while (first != last) - internal_unsafe_emplace(*first++); - } - - /** - * Copy constructor. Constructs the container with the copy of the - * contents of other. - * - * @param[in] other reference to the swmr_skip_list to be copied. - * - * @pre must be called in transaction scope. - * - * @post size() == other.size() - * - * @throw pmem::pool_error if an object is not in persistent memory. - * @throw pmem::transaction_alloc_error when allocating memory for - * copied elements in transaction failed. - * @throw pmem::transaction_scope_error if constructor wasn't called in - * transaction. - * @throw rethrows element constructor exception. - */ - swmr_skip_list(const swmr_skip_list &other) - : _node_allocator(node_allocator_traits:: - select_on_container_copy_construction( - other._node_allocator)), - _compare(other._compare), - _rnd_generator(other._rnd_generator) - { - check_tx_stage_work(); - init(); - internal_copy(other); - assert(_size == other._size); - } - - /** - * Copy constructor. Constructs the container with the copy of the - * contents of other. - * - * @param[in] other reference to the swmr_skip_list to be copied. - * @param[in] alloc allocator to use for all memory allocations of this - * container. - * - * @pre must be called in transaction scope. - * - * @post size() == other.size() - * - * @throw pmem::pool_error if an object is not in persistent memory. - * @throw pmem::transaction_alloc_error when allocating memory for - * copied elements in transaction failed. - * @throw pmem::transaction_scope_error if constructor wasn't called in - * transaction. - * @throw rethrows element constructor exception. - */ - swmr_skip_list(const swmr_skip_list &other, const allocator_type &alloc) - : _node_allocator(alloc), - _compare(other._compare), - _rnd_generator(other._rnd_generator) - { - check_tx_stage_work(); - init(); - internal_copy(other); - assert(_size == other._size); - } - - /** - * Move constructor. Constructs the container with the contents of other - * using move semantics. Allocator is obtained by move-construction from - * the allocator belonging to other - * - * @param[in] other reference to the swmr_skip_list to be copied. - * - * @pre must be called in transaction scope. - * - * @post size() == other.size() - * - * @throw pmem::pool_error if an object is not in persistent memory. - * @throw pmem::transaction_alloc_error when allocating memory for - * copied elements in transaction failed. - * @throw pmem::transaction_scope_error if constructor wasn't called in - * transaction. - * @throw rethrows element constructor exception. - */ - swmr_skip_list(swmr_skip_list &&other) - : _node_allocator(std::move(other._node_allocator)), - _compare(other._compare), - _rnd_generator(other._rnd_generator) - { - check_tx_stage_work(); - init(); - internal_move(std::move(other)); - } - - /** - * Move constructor. Constructs the container with the contents of other - * using move semantics. - * - * @param[in] other reference to the swmr_skip_list to be copied. - * @param[in] alloc allocator to use for all memory allocations of this - * container. - * - * @pre must be called in transaction scope. - * - * @post size() == other.size() - * - * @throw pmem::pool_error if an object is not in persistent memory. - * @throw pmem::transaction_alloc_error when allocating memory for - * copied elements in transaction failed. - * @throw pmem::transaction_scope_error if constructor wasn't called in - * transaction. - * @throw rethrows element constructor exception. - */ - swmr_skip_list(swmr_skip_list &&other, const allocator_type &alloc) - : _node_allocator(alloc), - _compare(other._compare), - _rnd_generator(other._rnd_generator) - { - check_tx_stage_work(); - init(); - if (alloc == other.get_allocator()) { - internal_move(std::move(other)); - } else { - init(); - internal_copy(std::make_move_iterator(other.begin()), - std::make_move_iterator(other.end())); - } - } - - /** - * Initialize swmr_skip_list after process restart. - * MUST be called every time after process restart. - * Not thread safe. - * - */ - void - runtime_initialize() - { - tls_restore(); - - assert(this->size() == - size_type(std::distance(this->begin(), this->end()))); - } - - /** - * Should be called before swmr_skip_list destructor is called. - * Otherwise, program can terminate if an exception occurs while freeing - * memory inside dtor. - * - * The skip list map can NOT be used after free_data() was called - * (unless it was called in a transaction and that transaction aborted). - * - * @throw std::transaction_error in case of PMDK transaction failure - * @throw pmem::transaction_free_error when freeing underlying memory - * failed. - */ - void - free_data() - { - if (dummy_head == nullptr) - return; - - auto pop = get_pool_base(); - obj::flat_transaction::run(pop, [&] { - clear(); - delete_dummy_head(); - }); - } - - /** - * Destructor. - * free_data should be called before swmr_skip_list - * destructor is called. Otherwise, program can terminate if - * an exception occurs while freeing memory inside dtor. - * - * The skip list map can NOT be used after free_data() was called - * (unless it was called in a transaction and that transaction aborted). - */ - ~swmr_skip_list() - { - try { - free_data(); - } catch (...) { - std::terminate(); - } - } - - /** - * Copy assignment operator. Replaces the contents with a copy of the - * contents of other transactionally. If - * std::allocator_traits::propagate_on_container_copy_assignment::value - * is true, the target allocator is replaced by a copy of the source - * allocator. - * - * @post size() == other.size() - * - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_free_error when freeing old existing - * elements failed. - * @throw rethrows constructor exception. - */ - swmr_skip_list & - operator=(const swmr_skip_list &other) - { - if (this == &other) - return *this; - - obj::pool_base pop = get_pool_base(); - obj::flat_transaction::run(pop, [&] { - using pocca_t = typename node_allocator_traits:: - propagate_on_container_copy_assignment; - clear(); - allocator_copy_assignment(_node_allocator, - other._node_allocator, - pocca_t()); - _compare = other._compare; - _rnd_generator = other._rnd_generator; - - internal_copy(other); - }); - return *this; - } - - /** - * Move assignment operator. Replaces the contents with those of other - * using move semantics (i.e. the data in other is moved from other into - * this container). other is in a valid but unspecified state - * afterwards. If - * std::allocator_traits::propagate_on_container_move_assignment::value - * is true, the target allocator is replaced by a copy of the source - * allocator. If it is false and the source and the target allocators do - * not compare equal, the target cannot take ownership of the source - * memory and must move-assign each element individually, allocating - * additional memory using its own allocator as needed. In any case, all - * elements originally present in *this are either destroyed or replaced - * by elementwise move-assignment. - * - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_free_error when freeing old existing - * elements failed. - * @throw rethrows constructor exception. - */ - swmr_skip_list & - operator=(swmr_skip_list &&other) - { - if (this == &other) - return *this; - - obj::pool_base pop = get_pool_base(); - obj::flat_transaction::run(pop, [&] { - using pocma_t = typename node_allocator_traits:: - propagate_on_container_move_assignment; - clear(); - if (pocma_t::value || - _node_allocator == other._node_allocator) { - delete_dummy_head(); - allocator_move_assignment(_node_allocator, - other._node_allocator, - pocma_t()); - _compare = other._compare; - _rnd_generator = other._rnd_generator; - internal_move(std::move(other)); - } else { - internal_copy( - std::make_move_iterator(other.begin()), - std::make_move_iterator(other.end())); - } - }); - return *this; - } - - /** - * Replaces the contents with those identified by initializer list il. - * - * @param[in] il initializer list to use as data source - * - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_free_error when freeing old existing - * elements failed. - * @throw rethrows constructor exception. - */ - swmr_skip_list & - operator=(std::initializer_list il) - { - obj::pool_base pop = get_pool_base(); - obj::flat_transaction::run(pop, [&] { - clear(); - for (auto it = il.begin(); it != il.end(); ++it) - internal_unsafe_emplace(*it); - }); - return *this; - } - - /** - * Inserts value in a thread-safe way. No iterators or references are - * invalidated. - * - * @param[in] value element value to insert. - * - * @return a pair consisting of an iterator to the inserted element (or - * to the element that prevented the insertion) and a bool denoting - * whether the insertion took place. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - std::pair - insert(const value_type &value) - { - return internal_insert(value.first, value); - } - - /** - * Inserts value. No iterators or references are invalidated. - * This overload is equivalent to emplace(std::forward

(value)) and - * only participates in overload resolution if - * std::is_constructible::value == true. - * - * @param[in] value element value to insert. - * - * @return a pair consisting of an iterator to the inserted element (or - * to the element that prevented the insertion) and a bool denoting - * whether the insertion took place. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - template ::value>::type> - std::pair - insert(P &&value) - { - return emplace(std::forward

(value)); - } - - /** - * Inserts value using move semantic. No iterators or references are - * invalidated. - * - * @param[in] value element value to insert. - * - * @return a pair consisting of an iterator to the inserted element (or - * to the element that prevented the insertion) and a bool denoting - * whether the insertion took place. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - std::pair - insert(value_type &&value) - { - return internal_insert(value.first, std::move(value)); - } - - /** - * Inserts value in the position as close as possible, just prior to - * hint. No iterators or references are invalidated. - * - * @param[in] hint iterator to the position before which the new element - * will be inserted. - * @param[in] value element value to insert. - * - * @return an iterator to the inserted element, or to the element that - * prevented the insertion. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - iterator - insert(const_iterator hint, const_reference value) - { - /* Ignore hint */ - return insert(value).first; - } - - /** - * Inserts value in the position as close as possible, just prior to - * hint. No iterators or references are invalidated. - * This overload is equivalent to emplace_hint(hint, - * std::forward

(value)) and only participates in overload resolution - * if std::is_constructible::value == true. - * - * @param[in] hint iterator to the position before which the new element - * will be inserted. - * @param[in] value element value to insert. - * - * @return an iterator to the inserted element, or to the element that - * prevented the insertion. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - template ::value>::type> - iterator - insert(const_iterator hint, P &&value) - { - return emplace_hint(hint, std::forward

(value)); - } - - /** - * Inserts elements from range [first, last). If multiple elements in - * the range have keys that compare equivalent, the first one is - * inserted. - * - * @param[in] first first iterator of inserted range. - * @param[in] last last iterator of inserted range. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - template - void - insert(InputIterator first, InputIterator last) - { - for (InputIterator it = first; it != last; ++it) - insert(*it); - } - - /** - * Inserts elements from initializer list ilist. If multiple elements in - * the range have keys that compare equivalent, the first one is - * inserted. - * - * @param[in] ilist first initializer list to insert the values from. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - void - insert(std::initializer_list ilist) - { - insert(ilist.begin(), ilist.end()); - } - - /** - * Inserts a new element into the container constructed in-place with - * the given args if there is no element with the key in the container. - * - * Careful use of emplace allows the new element to be constructed while - * avoiding unnecessary copy or move operations. The constructor of the - * new element (i.e. std::pair) is called with exactly the - * same arguments as supplied to emplace, forwarded via - * std::forward(args).... The element may be constructed even if - * there already is an element with the key in the container, in which - * case the newly constructed element will be destroyed immediately. - * - * No iterators or references are invalidated. - * - * @param[in] args arguments to forward to the constructor of the - * element - * - * @return a pair consisting of an iterator to the inserted element (or - * to the element that prevented the insertion) and a bool denoting - * whether the insertion took place. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - template - std::pair - emplace(Args &&... args) - { - return internal_emplace(std::forward(args)...); - } - - /** - * Inserts a new element to the container as close as possible to the - * position just before hint. The element is constructed in-place, i.e. - * no copy or move operations are performed. - * - * The constructor of the element type (value_type, that is, - * std::pair) is called with exactly the same arguments as - * supplied to the function, forwarded with std::forward(args)... - * - * No iterators or references are invalidated. - * - * @param[in] hint iterator to the position before which the new element - * will be inserted. - * @param[in] args arguments to forward to the constructor of the - * element. - * - * @return Returns an iterator to the newly inserted element. - * - * If the insertion failed because the element already exists, returns - * an iterator to the already existing element with the equivalent key. - * - * @return an iterator to the inserted element, or to the element that - * prevented the insertion. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - template - iterator - emplace_hint(const_iterator hint, Args &&... args) - { - /* Ignore hint */ - return emplace(std::forward(args)...).first; - } - - /** - * If a key equivalent to k already exists in the container, does - * nothing. Otherwise, behaves like emplace except that the element is - * constructed as value_type(std::piecewise_construct, - * std::forward_as_tuple(k), - * std::forward_as_tuple(std::forward(args)...)) - * - * No iterators or references are invalidated. - * - * @param[in] k the key used both to look up and to insert if not found. - * @param[in] args arguments to forward to the constructor of the - * element. - * - * @return a pair consisting of an iterator to the inserted element (or - * to the element that prevented the insertion) and a bool denoting - * whether the insertion took place. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - template - std::pair - try_emplace(const key_type &k, Args &&... args) - { - return internal_try_emplace(k, std::forward(args)...); - } - - /** - * If a key equivalent to k already exists in the container, does - * nothing. Otherwise, behaves like emplace except that the element is - * constructed as value_type(std::piecewise_construct, - * std::forward_as_tuple(std::move(k)), - * std::forward_as_tuple(std::forward(args)...)). - * - * No iterators or references are invalidated. - * - * @param[in] k the key used both to look up and to insert if not found. - * @param[in] args arguments to forward to the constructor of the - * element. - * - * @return a pair consisting of an iterator to the inserted element (or - * to the element that prevented the insertion) and a bool denoting - * whether the insertion took place. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw pmem::transaction_scope_error if called inside transaction. - * @throw rethrows constructor exception. - */ - template - std::pair - try_emplace(key_type &&k, Args &&... args) - { - return internal_try_emplace(std::move(k), - std::forward(args)...); - } - - /** - * If a key equivalent to k already exists in the container, does - * nothing. Otherwise, behaves like emplace except that the element is - * constructed as value_type(std::piecewise_construct, - * std::forward_as_tuple(std::move(k)), - * std::forward_as_tuple(std::forward(args)...)). - * This overload only participates in overload resolution if the - * qualified-id Compare::is_transparent is valid and denotes a type and - * std::is_constructible::value == true . It allows calling - * this function without constructing an instance of Key. - * - * No iterators or references are invalidated. - * - * @param[in] k the key used both to look up and to insert if not found. - * @param[in] args arguments to forward to the constructor of the - * element. - * - * @return a pair consisting of an iterator to the inserted element (or - * to the element that prevented the insertion) and a bool denoting - * whether the insertion took place. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw pmem::transaction_alloc_error when allocating new memory - * failed. - * @throw rethrows constructor exception. - */ - template - typename std::enable_if< - has_is_transparent::value && - std::is_constructible::value, - std::pair>::type - try_emplace(K &&k, Args &&... args) - { - return internal_try_emplace(std::forward(k), - std::forward(args)...); - } - - /** - * Removes the element at pos from the container. - * References and iterators to the erased elements are invalidated. - * Other references and iterators are not affected. - * - * @pre The iterator pos must be valid and dereferenceable. Thus the - * end() iterator (which is valid, but is not dereferenceable) cannot be - * used as a value for pos. - * - * @param[in] pos iterator to the element to remove. - * - * @return iterator following the removed element. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw rethrows destructor exception. - */ - iterator - unsafe_erase(iterator pos) - { - check_outside_tx(); - auto &size_diff = tls_data.local().size_diff; - return internal_erase(pos, size_diff); - } - - /** - * Removes the element at pos from the container. - * References and iterators to the erased elements are invalidated. - * Other references and iterators are not affected. - * - * @pre The iterator pos must be valid and dereferenceable. Thus the - * end() iterator (which is valid, but is not dereferenceable) cannot be - * used as a value for pos. - * - * @param[in] pos iterator to the element to remove. - * - * @return iterator following the removed element. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw rethrows destructor exception. - */ - iterator - unsafe_erase(const_iterator pos) - { - return unsafe_erase(get_iterator(pos)); - } - - /** - * Removes the elements in the range [first; last), which must be a - * valid range in *this. - * References and iterators to the erased elements are invalidated. - * Other references and iterators are not affected. - * - * @param[in] first first iterator in the range of elements to remove. - * @param[in] last last iterator in the range of elements to remove. - * - * @return iterator following the last removed element. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw rethrows destructor exception. - */ - iterator - unsafe_erase(const_iterator first, const_iterator last) - { - check_outside_tx(); - obj::pool_base pop = get_pool_base(); - auto &size_diff = tls_data.local().size_diff; - - obj::flat_transaction::run(pop, [&] { - while (first != last) { - first = internal_erase(first, size_diff); - } - }); - - return get_iterator(first); - } - - /** - * Removes the element (if one exists) with the key equivalent to key. - * References and iterators to the erased elements are invalidated. - * Other references and iterators are not affected. - * - * @param[in] key key value of the elements to remove. - * - * @return Number of elements removed. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw rethrows destructor exception. - */ - size_type - unsafe_erase(const key_type &key) - { - std::pair range = equal_range(key); - size_type sz = static_cast( - std::distance(range.first, range.second)); - unsafe_erase(range.first, range.second); - return sz; - } - - /** - * Removes the element (if one exists) with the key equivalent to key. - * References and iterators to the erased elements are invalidated. - * Other references and iterators are not affected. - * This overload only participates in overload resolution if the - * qualified-id Compare::is_transparent is valid and denotes a type and - * std::is_convertible::value != true && - * std::is_convertible::value != true. - * It allows calling this function without constructing an instance of - * Key. - * - * @param[in] key key value of the elements to remove. - * - * @return Number of elements removed. - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw rethrows destructor exception. - */ - template < - typename K, - typename = typename std::enable_if< - has_is_transparent::value && - !std::is_convertible::value && - !std::is_convertible::value, - K>::type> - size_type - unsafe_erase(const K &key) - { - std::pair range = equal_range(key); - size_type sz = static_cast( - std::distance(range.first, range.second)); - unsafe_erase(range.first, range.second); - return sz; - } - - /** - * Returns an iterator pointing to the first element that is not less - * than (i.e. greater or equal to) key. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - iterator - lower_bound(const key_type &key) - { - return internal_get_bound(key, _compare); - } - - /** - * Returns an iterator pointing to the first element that is not less - * than (i.e. greater or equal to) key. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - const_iterator - lower_bound(const key_type &key) const - { - return internal_get_bound(key, _compare); - } - - /** - * Returns an iterator pointing to the first element that compares not - * less (i.e. greater or equal) to the value x. This overload only - * participates in overload resolution if the qualified-id - * Compare::is_transparent is valid and denotes a type. They allow - * calling this function without constructing an instance of Key. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - iterator - lower_bound(const K &x) - { - return internal_get_bound(x, _compare); - } - - /** - * Returns an iterator pointing to the first element that compares not - * less (i.e. greater or equal) to the value x. This overload only - * participates in overload resolution if the qualified-id - * Compare::is_transparent is valid and denotes a type. They allow - * calling this function without constructing an instance of Key. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - const_iterator - lower_bound(const K &x) const - { - return internal_get_bound(x, _compare); - } - - /** - * Returns an iterator pointing to the first element that is not less - * than (i.e. greater or equal to) key. Equivalent of lower_bound. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - iterator - find_higher_eq(const key_type &key) - { - return internal_get_bound(key, _compare); - } - - /** - * Returns an iterator pointing to the first element that is not less - * than (i.e. greater or equal to) key. Equivalent of lower_bound. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - const_iterator - find_higher_eq(const key_type &key) const - { - return internal_get_bound(key, _compare); - } - - /** - * Returns an iterator pointing to the first element that compares not - * less (i.e. greater or equal) to the value x. This overload only - * participates in overload resolution if the qualified-id - * Compare::is_transparent is valid and denotes a type. They allow - * calling this function without constructing an instance of Key. - * Equivalent of lower_bound. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - iterator - find_higher_eq(const K &x) - { - return internal_get_bound(x, _compare); - } - - /** - * Returns an iterator pointing to the first element that compares not - * less (i.e. greater or equal) to the value x. This overload only - * participates in overload resolution if the qualified-id - * Compare::is_transparent is valid and denotes a type. They allow - * calling this function without constructing an instance of Key. - * Equivalent of lower_bound. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is not less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - const_iterator - find_higher_eq(const K &x) const - { - return internal_get_bound(x, _compare); - } - - /** - * Returns an iterator pointing to the first element that is greater - * than key. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - iterator - upper_bound(const key_type &key) - { - return internal_get_bound(key, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the first element that is greater - * than key. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - const_iterator - upper_bound(const key_type &key) const - { - return internal_get_bound(key, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the first element that compares - * greater to the value x. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - iterator - upper_bound(const K &x) - { - return internal_get_bound(x, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the first element that compares - * greater to the value x. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - const_iterator - upper_bound(const K &x) const - { - return internal_get_bound(x, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the first element that is greater - * than key. Equivalent of upper_bound. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - iterator - find_higher(const key_type &key) - { - return internal_get_bound(key, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the first element that is greater - * than key. Equivalent of upper_bound. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - const_iterator - find_higher(const key_type &key) const - { - return internal_get_bound(key, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the first element that compares - * greater to the value x. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. Equivalent of upper_bound. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - iterator - find_higher(const K &x) - { - return internal_get_bound(x, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the first element that compares - * greater to the value x. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. Equivalent of upper_bound. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return Iterator pointing to the first element that is greater than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - const_iterator - find_higher(const K &x) const - { - return internal_get_bound(x, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the biggest element that is less - * than key. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the biggest element that is less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - iterator - find_lower(const key_type &key) - { - auto it = internal_get_biggest_less_than(key, _compare); - return iterator( - const_cast(it.node)); - } - - /** - * Returns a const iterator pointing to the biggest element that is less - * than key. - * - * @param[in] key key value to compare the elements to. - * - * @return Const iterator pointing to the biggest element that is less - * than key. If no such element is found, a past-the-end iterator is - * returned. - */ - const_iterator - find_lower(const key_type &key) const - { - return internal_get_biggest_less_than(key, _compare); - } - - /** - * Returns an iterator pointing to the biggest element that is less - * than key. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. - * - * @param[in] key alternative value that can be compared to Key. - * - * @return Iterator pointing to the biggest element that is less than - * key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - iterator - find_lower(const K &key) - { - auto it = internal_get_biggest_less_than(key, _compare); - return iterator( - const_cast(it.node)); - } - - /** - * Returns a const iterator pointing to the biggest element that is less - * than key. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. - * - * @param[in] key alternative value that can be compared to Key. - * - * @return Const iterator pointing to the biggest element that is less - * than key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - const_iterator - find_lower(const K &key) const - { - return internal_get_biggest_less_than(key, _compare); - } - - /** - * Returns an iterator pointing to the biggest element that is less - * than or equal to key. - * - * @param[in] key key value to compare the elements to. - * - * @return Iterator pointing to the biggest element that is less than - * or equal to key. If no such element is found, a past-the-end iterator - * is returned. - */ - iterator - find_lower_eq(const key_type &key) - { - auto it = internal_get_biggest_less_than( - key, not_greater_compare(_compare)); - return iterator( - const_cast(it.node)); - } - - /** - * Returns a const iterator pointing to the biggest element that is less - * than or equal to key. - * - * @param[in] key key value to compare the elements to. - * - * @return Const iterator pointing to the biggest element that is less - * than or equal to key. If no such element is found, a past-the-end - * iterator is returned. - */ - const_iterator - find_lower_eq(const key_type &key) const - { - return internal_get_biggest_less_than( - key, not_greater_compare(_compare)); - } - - /** - * Returns an iterator pointing to the biggest element that is less - * than or equal to key. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. - * - * @param[in] key alternative value that can be compared to Key. - * - * @return Iterator pointing to the biggest element that is less than or - * equal to key. If no such element is found, a past-the-end iterator is - * returned. - */ - template ::value, K>::type> - iterator - find_lower_eq(const K &key) - { - auto it = internal_get_biggest_less_than( - key, not_greater_compare(_compare)); - return iterator( - const_cast(it.node)); - } - - /** - * Returns a const iterator pointing to the biggest element that is less - * than or equal to key. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. - * - * @param[in] key alternative value that can be compared to Key. - * - * @return Const iterator pointing to the biggest element that is less - * than or equal to key. If no such element is found, a past-the-end - * iterator is returned. - */ - template ::value, K>::type> - const_iterator - find_lower_eq(const K &key) const - { - return internal_get_biggest_less_than( - key, not_greater_compare(_compare)); - } - - /** - * Finds an element with key equivalent to key. - * - * @param[in] key key value of the element to search for. - * - * @return Iterator to an element with key equivalent to key. If no such - * element is found, past-the-end iterator is returned. - */ - iterator - find(const key_type &key) - { - return internal_find(key); - } - - /** - * Finds an element with key equivalent to key. - * - * @param[in] key key value of the element to search for. - * - * @return Iterator to an element with key equivalent to key. If no such - * element is found, past-the-end iterator is returned. - */ - const_iterator - find(const key_type &key) const - { - return internal_find(key); - } - - /** - * Finds an element with key that compares equivalent to the value x. - * This overload only participates in overload resolution if the - * qualified-id Compare::is_transparent is valid and denotes a type. It - * allows calling this function without constructing an instance of Key. - * - * @param[in] x a value of any type that can be transparently compared - * with a key. - * - * @return Iterator to an element with key equivalent to key. If no such - * element is found, past-the-end iterator is returned. - */ - template ::value, K>::type> - iterator - find(const K &x) - { - return internal_find(x); - } - - /** - * Finds an element with key that compares equivalent to the value x. - * This overload only participates in overload resolution if the - * qualified-id Compare::is_transparent is valid and denotes a type. It - * allows calling this function without constructing an instance of Key. - * - * @param[in] x a value of any type that can be transparently compared - * with a key. - * - * @return Iterator to an element with key equivalent to key. If no such - * element is found, past-the-end iterator is returned. - */ - template ::value, K>::type> - const_iterator - find(const K &x) const - { - return internal_find(x); - } - - /** - * Returns the number of elements with key that compares equivalent to - * the specified argument. - * - * @param[in] key key value of the element to count. - * - * @return Number of elements with key that compares equivalent to the - * specified argument. - */ - size_type - count(const key_type &key) const - { - return internal_count(key); - } - - /** - * Returns the number of elements with key that compares equivalent to - * the specified argument. This overload only participates in overload - * resolution if the qualified-id Compare::is_transparent is valid and - * denotes a type. They allow calling this function without constructing - * an instance of Key. - * - * @param[in] key alternative value to compare to the keys. - * - * @return Number of elements with key that compares equivalent to the - * specified argument. - */ - template ::value, K>::type> - size_type - count(const K &key) const - { - return internal_count(key); - } - - /** - * Checks if there is an element with key equivalent to key in the - * container. - * - * @param[in] key key value of the element to search for. - * - * @return true if there is such an element, otherwise false. - */ - bool - contains(const key_type &key) const - { - return find(key) != end(); - } - - /** - * Checks if there is an element with key that compares equivalent to - * the value x. This overload only participates in overload resolution - * if the qualified-id Compare::is_transparent is valid and denotes a - * type. It allows calling this function without constructing an - * instance of Key. - * - * @param[in] x a value of any type that can be transparently compared - * with a key. - * - * @return true if there is such an element, otherwise false. - */ - template ::value, K>::type> - bool - contains(const K &x) const - { - return find(x) != end(); - } - - /** - * Erases all elements from the container transactionally. - * - * @post size() == 0 - * - * @throw pmem::transaction_error when snapshotting failed. - * @throw rethrows destructor exception. - */ - void - clear() - { - assert(dummy_head->height() > 0); - obj::pool_base pop = get_pool_base(); - - persistent_node_ptr current = dummy_head->next(0); - - obj::flat_transaction::run(pop, [&] { - while (current) { - assert(current->height() > 0); - persistent_node_ptr next = current->next(0); - delete_node(current); - current = next; - } - - node_ptr head = dummy_head.get(); - for (size_type i = 0; i < head->height(); ++i) { - head->set_next_tx(i, nullptr); - } - - on_init_size = 0; - tls_data.clear(); - obj::flat_transaction::snapshot((size_t *)&_size); - _size = 0; - }); - } - - /** - * Returns an iterator to the first element of the container. - * If the map is empty, the returned iterator will be equal to end(). - * - * @return Iterator to the first element. - */ - iterator - begin() - { - return iterator(dummy_head.get()->next(0).get()); - } - - /** - * Returns an iterator to the first element of the container. - * If the map is empty, the returned iterator will be equal to end(). - * - * @return Iterator to the first element. - */ - const_iterator - begin() const - { - return const_iterator(dummy_head.get()->next(0).get()); - } - - /** - * Returns an iterator to the first element of the container. - * If the map is empty, the returned iterator will be equal to end(). - * - * @return Iterator to the first element. - */ - const_iterator - cbegin() const - { - return const_iterator(dummy_head.get()->next(0).get()); - } - - /** - * Returns an iterator to the element following the last element of the - * map. This element acts as a placeholder; attempting to access it - * results in undefined behavior. - * - * @return Iterator to the element following the last element. - */ - iterator - end() - { - return iterator(nullptr); - } - - /** - * Returns an iterator to the element following the last element of the - * map. This element acts as a placeholder; attempting to access it - * results in undefined behavior. - * - * @return Iterator to the element following the last element. - */ - const_iterator - end() const - { - return const_iterator(nullptr); - } - - /** - * Returns an iterator to the element following the last element of the - * map. This element acts as a placeholder; attempting to access it - * results in undefined behavior. - * - * @return Iterator to the element following the last element. - */ - const_iterator - cend() const - { - return const_iterator(nullptr); - } - - /** - * Returns the number of elements in the container, i.e. - * std::distance(begin(), end()). - * - * @return The number of elements in the container. - */ - size_type - size() const - { - return _size.load(std::memory_order_relaxed); - } - - /** - * Returns the maximum number of elements the container is able to hold - * due to system or library implementation limitations, i.e. - * std::distance(begin(), end()) for the largest container. - * - * @return Maximum number of elements. - */ - size_type - max_size() const - { - return std::numeric_limits::max(); - } - - /** - * Checks if the container has no elements, i.e. whether begin() == - * end(). - * - * @return true if the container is empty, false otherwise. - */ - bool - empty() const - { - return 0 == size(); - } - - /** - * XXX: Implement get_allocator() interface. - * Related with: https://github.com/pmem/libpmemobj-cpp/issues/827 - */ - - /** - * Exchanges the contents of the container with those of other - * transactionally. Does not invoke any move, copy, or swap operations - * on individual elements. - * - * @throw pmem::transaction_error when snapshotting failed. - */ - void - swap(swmr_skip_list &other) - { - obj::pool_base pop = get_pool_base(); - obj::flat_transaction::run(pop, [&] { - using pocs_t = typename node_allocator_traits:: - propagate_on_container_swap; - allocator_swap(_node_allocator, other._node_allocator, - pocs_t()); - std::swap(_compare, other._compare); - std::swap(_rnd_generator, other._rnd_generator); - std::swap(dummy_head, other.dummy_head); - on_init_size.swap(other.on_init_size); - - obj::flat_transaction::snapshot((size_t *)&_size); - obj::flat_transaction::snapshot( - (size_t *)&(other._size)); - _size = other._size.exchange(_size, - std::memory_order_relaxed); - }); - } - - /** - * Returns a range containing all elements with the given key in the - * container. The range is defined by two iterators, one pointing to the - * first element that is not less than key and another pointing to the - * first element greater than key. Alternatively, the first iterator may - * be obtained with lower_bound(), and the second with upper_bound(). - * - * Compares the keys to key. - * - * @param[in] key key value to compare the elements to. - * - * @return std::pair containing a pair of iterators defining the wanted - * range: the first pointing to the first element that is not less than - * key and the second pointing to the first element greater than key. If - * there are no elements not less than key, past-the-end (see end()) - * iterator is returned as the first element. Similarly if there are no - * elements greater than key, past-the-end iterator is returned as the - * second element. - */ - std::pair - equal_range(const key_type &key) - { - return std::pair(lower_bound(key), - upper_bound(key)); - } - - /** - * Returns a range containing all elements with the given key in the - * container. The range is defined by two iterators, one pointing to the - * first element that is not less than key and another pointing to the - * first element greater than key. Alternatively, the first iterator may - * be obtained with lower_bound(), and the second with upper_bound(). - * - * Compares the keys to key. - * - * @param[in] key key value to compare the elements to. - * - * @return std::pair containing a pair of iterators defining the wanted - * range: the first pointing to the first element that is not less than - * key and the second pointing to the first element greater than key. If - * there are no elements not less than key, past-the-end (see end()) - * iterator is returned as the first element. Similarly if there are no - * elements greater than key, past-the-end iterator is returned as the - * second element. - */ - std::pair - equal_range(const key_type &key) const - { - return std::pair( - lower_bound(key), upper_bound(key)); - } - - /** - * Returns a range containing all elements with the given key in the - * container. The range is defined by two iterators, one pointing to the - * first element that is not less than key and another pointing to the - * first element greater than key. Alternatively, the first iterator may - * be obtained with lower_bound(), and the second with upper_bound(). - * - * Compares the keys to the value x. This overload only participates in - * overload resolution if the qualified-id Compare::is_transparent is - * valid and denotes a type. They allow calling this function without - * constructing an instance of Key. - * - * @param[in] x alternative value that can be compared to Key. - * - * @return std::pair containing a pair of iterators defining the wanted - * range: the first pointing to the first element that is not less than - * key and the second pointing to the first element greater than key. If - * there are no elements not less than key, past-the-end (see end()) - * iterator is returned as the first element. Similarly if there are no - * elements greater than key, past-the-end iterator is returned as the - * second element. - */ - template ::value, K>::type> - std::pair - equal_range(const K &x) - { - return std::pair(lower_bound(x), - upper_bound(x)); - } - - /** - * Returns a range containing all elements with the given key in the - * container. The range is defined by two iterators, one pointing to the - * first element that is not less than key and another pointing to the - * first element greater than key. Alternatively, the first iterator may - * be obtained with lower_bound(), and the second with upper_bound(). - * - * Compares the keys to the value x. This overload only participates in - * overload resolution if the qualified-id Compare::is_transparent is - * valid and denotes a type. They allow calling this function without - * constructing an instance of Key. - * - * @param[in] key alternative value that can be compared to Key. - * - * @return std::pair containing a pair of iterators defining the wanted - * range: the first pointing to the first element that is not less than - * key and the second pointing to the first element greater than key. If - * there are no elements not less than key, past-the-end (see end()) - * iterator is returned as the first element. Similarly if there are no - * elements greater than key, past-the-end iterator is returned as the - * second element. - */ - template ::value, K>::type> - std::pair - equal_range(const K &key) const - { - return std::pair( - lower_bound(key), upper_bound(key)); - } - - /** - * Returns a const reference to the object that compares the keys. - * - * @return Const reference to the key comparison function object. - */ - const key_compare & - key_comp() const - { - return _compare; - } - - /** - * Returns a reference to the object that compares the keys. - * - * @return Reference to the key comparison function object. - */ - key_compare & - key_comp() - { - return _compare; - } - -private: - /* Status flags stored in insert_stage field */ - enum insert_stage_type : uint8_t { not_started = 0, in_progress = 1 }; - /* - * Structure of thread local data. - * Size should be 64 bytes. - */ - struct tls_entry_type { - persistent_node_ptr ptr; - obj::p size_diff; - obj::p insert_stage; - - char reserved[64 - sizeof(decltype(ptr)) - - sizeof(decltype(size_diff)) - - sizeof(decltype(insert_stage))]; - }; - static_assert(sizeof(tls_entry_type) == 64, - "The size of tls_entry_type should be 64 bytes."); - - /** - * Private helper function. Checks if current transaction stage is equal - * to TX_STAGE_WORK and throws an exception otherwise. - * - * @throw pmem::transaction_scope_error if current transaction stage is - * not equal to TX_STAGE_WORK. - */ - void - check_tx_stage_work() const - { - if (pmemobj_tx_stage() != TX_STAGE_WORK) - throw pmem::transaction_scope_error( - "Function called out of transaction scope."); - } - - /* Helper method which throws an exception when called in a tx */ - static inline void - check_outside_tx() - { - if (pmemobj_tx_stage() != TX_STAGE_NONE) - throw pmem::transaction_scope_error( - "Function called inside transaction scope."); - } - - void - init() - { - if (pool_uuid == 0) - throw pmem::pool_error("Invalid pool handle."); - - _size = 0; - on_init_size = 0; - create_dummy_head(); - } - - void - internal_move(swmr_skip_list &&other) - { - assert(this->empty()); - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - dummy_head = other.dummy_head; - other.dummy_head = nullptr; - other.create_dummy_head(); - - _size.store(other._size.load(std::memory_order_relaxed), - std::memory_order_relaxed); - on_init_size = other.on_init_size; - } - - static const_reference - get_val(const_node_ptr n) - { - assert(n); - return *(n->get()); - } - - static reference - get_val(node_ptr n) - { - assert(n); - return *(n->get()); - } - - static const key_type & - get_key(const_node_ptr n) - { - assert(n); - return traits_type::get_key(get_val(n)); - } - - template - iterator - internal_find(const K &key) - { - iterator it = lower_bound(key); - return (it == end() || _compare(key, traits_type::get_key(*it))) - ? end() - : it; - } - - template - const_iterator - internal_find(const K &key) const - { - const_iterator it = lower_bound(key); - return (it == end() || _compare(key, traits_type::get_key(*it))) - ? end() - : it; - } - - template - size_type - internal_count(const K &key) const - { - if (allow_multimapping) { - std::pair range = - equal_range(key); - return static_cast( - std::distance(range.first, range.second)); - } - return (find(key) == end()) ? size_type(0) : size_type(1); - } - - /** - * Finds position on the @arg level using @arg cmp - * @param level - on which level search prev node - * @param prev - pointer to the start node to search - * @param key - key to search - * @param cmp - callable object to compare two objects - * (_compare member is default comparator) - * @returns pointer to the node which is not satisfy the comparison with - * @arg key - */ - template - persistent_node_ptr - internal_find_position(size_type level, pointer_type &prev, - const K &key, const comparator &cmp) const - { - assert(level < prev->height()); - persistent_node_ptr next = prev->next(level); - pointer_type curr = next.get(); - - while (curr && cmp(get_key(curr), key)) { - prev = curr; - assert(level < prev->height()); - next = prev->next(level); - curr = next.get(); - } - - return next; - } - - /** - * The method finds insert position for the given @arg key. It finds - * successor and predecessor nodes on each level of the skip list. - * - * @param[out] prev_nodes array of pointers to predecessor nodes on each - * level. - * @param[out] next_nodes array of pointers to successor nodes on each - * level. - * @param[in] key inserted key. - */ - template - void - find_insert_pos(prev_array_type &prev_nodes, - next_array_type &next_nodes, const K &key) - { - if (allow_multimapping) { - fill_prev_next_arrays(prev_nodes, next_nodes, key, - not_greater_compare(_compare)); - } else { - fill_prev_next_arrays(prev_nodes, next_nodes, key, - _compare); - } - } - - /** - * The method finds successor and predecessor nodes on each level of the - * skip list for the given @arg key. - * - * @param[out] prev_nodes array of pointers to predecessor nodes on each - * level. - * @param[out] next_nodes array of pointers to successor nodes on each - * level. - * @param[in] key inserted key. - * @param[in] cmp comparator functor used for the search. - */ - template - void - fill_prev_next_arrays(prev_array_type &prev_nodes, - next_array_type &next_nodes, const K &key, - const comparator &cmp) - { - node_ptr prev = dummy_head.get(); - prev_nodes.fill(prev); - next_nodes.fill(nullptr); - - for (size_type h = prev->height(); h > 0; --h) { - persistent_node_ptr next = - internal_find_position(h - 1, prev, key, cmp); - prev_nodes[h - 1] = prev; - next_nodes[h - 1] = next; - } - } - - template - std::pair - internal_try_emplace(K &&key, Args &&... args) - { - return internal_insert( - key, std::piecewise_construct, - std::forward_as_tuple(std::forward(key)), - std::forward_as_tuple(std::forward(args)...)); - } - - template - std::pair - internal_emplace(Args &&... args) - { - check_outside_tx(); - tls_entry_type &tls_entry = tls_data.local(); - obj::pool_base pop = get_pool_base(); - - obj::flat_transaction::run(pop, [&] { - assert(tls_entry.ptr == nullptr); - tls_entry.ptr = - create_node(std::forward(args)...); - ++tls_entry.size_diff; - tls_entry.insert_stage = not_started; - }); - - node_ptr n = tls_entry.ptr.get(); - size_type height = n->height(); - - std::pair insert_result = internal_insert_node( - get_key(n), height, - [&](const next_array_type &next_nodes) - -> persistent_node_ptr & { - assert(tls_entry.insert_stage == not_started); - assert(tls_entry.ptr != nullptr); - n->set_nexts(pop, next_nodes.data(), height); - - tls_entry.insert_stage = in_progress; - pop.persist(&(tls_entry.insert_stage), - sizeof(tls_entry.insert_stage)); - - return tls_entry.ptr; - }); - - if (!insert_result.second) { - assert(tls_entry.ptr != nullptr); - assert(tls_entry.insert_stage == not_started); - - obj::flat_transaction::run(pop, [&] { - --tls_entry.size_diff; - delete_node(tls_entry.ptr); - tls_entry.ptr = nullptr; - }); - } - - assert(tls_entry.ptr == nullptr); - return insert_result; - } - - /** - * Not thread-safe but can be called within a transaction. - * XXX: Need to optimize for single-threaded case. - */ - template - std::pair - internal_unsafe_emplace(Args &&... args) - { - check_tx_stage_work(); - - persistent_node_ptr new_node = - create_node(std::forward(args)...); - - node_ptr n = new_node.get(); - size_type height = n->height(); - - std::pair insert_result = internal_insert_node( - get_key(n), height, - [&](const next_array_type &next_nodes) - -> persistent_node_ptr & { - assert(new_node != nullptr); - - n->set_nexts(next_nodes.data(), height); - - return new_node; - }); - - if (insert_result.second) { - ++on_init_size; - } else { - assert(new_node != nullptr); - - delete_node(new_node); - } - - return insert_result; - } - - /** - * Construct and insert new node to the skip list in a thread-safe way. - */ - template - std::pair - internal_insert(const K &key, Args &&... args) - { - check_outside_tx(); - tls_entry_type &tls_entry = tls_data.local(); - assert(tls_entry.ptr == nullptr); - - size_type height = random_level(); - - std::pair insert_result = internal_insert_node( - key, height, - [&](const next_array_type &next_nodes) - -> persistent_node_ptr & { - obj::pool_base pop = get_pool_base(); - - obj::flat_transaction::manual tx(pop); - tls_entry.ptr = create_node( - std::forward_as_tuple( - height, next_nodes.data()), - std::forward_as_tuple( - std::forward(args)...)); - - ++(tls_entry.size_diff); - tls_entry.insert_stage = in_progress; - obj::flat_transaction::commit(); - - assert(tls_entry.ptr != nullptr); - return tls_entry.ptr; - }); - - assert(tls_entry.ptr == nullptr); - - return insert_result; - } - - /** - * Try to insert new node to the skip list in a thread-safe way. - */ - template - std::pair - internal_insert_node(const K &key, size_type height, - PrepareNode &&prepare_new_node) - { - prev_array_type prev_nodes; - next_array_type next_nodes; - node_ptr n = nullptr; - - do { - find_insert_pos(prev_nodes, next_nodes, key); - - node_ptr next = next_nodes[0].get(); - if (next && !allow_multimapping && - !_compare(key, get_key(next))) { - - return std::pair(iterator(next), - false); - } - - } while ((n = try_insert_node(prev_nodes, next_nodes, height, - std::forward( - prepare_new_node))) == - nullptr); - - assert(n); - return std::pair(iterator(n), true); - } - - /** - * Try to insert new node to the skip list. - * @returns pointer to the new node if it was inserted. Otherwise, - * returns nullptr. - */ - template - node_ptr - try_insert_node(prev_array_type &prev_nodes, - const next_array_type &next_nodes, size_type height, - PrepareNode &&prepare_new_node) - { - assert(dummy_head->height() >= height); - - lock_array locks; - if (!try_lock_nodes(height, prev_nodes, next_nodes, locks)) { - return nullptr; - } - - node_lock_type new_node_lock; - - persistent_node_ptr &new_node = prepare_new_node(next_nodes); - assert(new_node != nullptr); - node_ptr n = new_node.get(); - - /* - * We need to hold lock to the new node until changes - * are committed to persistent domain. Otherwise, the - * new node would be visible to concurrent inserts - * before it is persisted. - */ - new_node_lock = n->acquire(); - - obj::pool_base pop = get_pool_base(); - /* - * In the loop below we are linking a new node to all layers of - * the skip list. Transaction is not required because in case of - * failure the node is reachable via a pointer from persistent - * TLS. During recovery, we will complete the insert. It is also - * OK if concurrent readers will see not a fully-linked node - * because during recovery the insert procedure will be - * completed. - */ - for (size_type level = 0; level < height; ++level) { - assert(prev_nodes[level]->height() > level); - assert(prev_nodes[level]->next(level) == - next_nodes[level]); - assert(prev_nodes[level]->next(level) == - n->next(level)); - prev_nodes[level]->set_next(level, new_node); - } - -#ifndef NDEBUG - try_insert_node_finish_marker(); -#endif - - new_node = nullptr; - /* We need to persist the node pointer. Otherwise, on a restart, - * this pointer might be not null but the node can be already - * deleted. */ - pop.persist(&new_node, sizeof(new_node)); - - ++_size; -#if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED - VALGRIND_PMC_DO_FLUSH(&_size, sizeof(_size)); -#endif - - assert(n); - return n; - } - - /** - * Used only inside asserts. - * Checks that prev_array is filled with correct values. - */ - bool - check_prev_array(const prev_array_type &prevs, size_type height) - { - for (size_type l = 1; l < height; ++l) { - if (prevs[l] == dummy_head.get()) { - continue; - } - - assert(prevs[l - 1] != dummy_head.get()); - assert(!_compare(get_key(prevs[l - 1]), - get_key(prevs[l]))); - } - - return true; - } - - bool - try_lock_nodes(size_type height, prev_array_type &prevs, - const next_array_type &nexts, lock_array &locks) - { - assert(check_prev_array(prevs, height)); - - for (size_type l = 0; l < height; ++l) { - if (l == 0 || prevs[l] != prevs[l - 1]) { - locks[l] = prevs[l]->acquire(); - } - - persistent_node_ptr next = prevs[l]->next(l); - if (next != nexts[l]) - /* Other thread inserted to this position and - * modified the pointer before we acquired the - * lock */ - return false; - } - - return true; - } - - /** - * Returns an iterator pointing to the first element from the list for - * which cmp(element, key) is false. - * - * @param[in] key key value to compare the elements to. - * @param[in] cmp comparator functor used for the search. - * - * @return Iterator pointing to the first element for which - * cmp(element, key) is false. If no such element is found, a - * past-the-end iterator is returned. - */ - template - const_iterator - internal_get_bound(const K &key, const comparator &cmp) const - { - const_node_ptr prev = dummy_head.get(); - assert(prev->height() > 0); - persistent_node_ptr next = nullptr; - - for (size_type h = prev->height(); h > 0; --h) { - next = internal_find_position(h - 1, prev, key, cmp); - } - - return const_iterator(next.get()); - } - - /** - * Returns an iterator pointing to the first element from the list for - * which cmp(element, key) is false. - * - * @param[in] key key value to compare the elements to. - * @param[in] cmp comparator functor used for the search. - * - * @return Iterator pointing to the first element for which - * cmp(element, key) is false. If no such element is found, a - * past-the-end iterator is returned. - */ - template - iterator - internal_get_bound(const K &key, const comparator &cmp) - { - node_ptr prev = dummy_head.get(); - assert(prev->height() > 0); - persistent_node_ptr next = nullptr; - - for (size_type h = prev->height(); h > 0; --h) { - next = internal_find_position(h - 1, prev, key, cmp); - } - - return iterator(next.get()); - } - - /** - * Returns an iterator pointing to the last element from the list for - * which cmp(element, key) is true. - * - * @param[in] key key value to compare the elements to. - * @param[in] cmp comparator functor used for the search. - * - * @return Iterator pointing to the first element for which - * cmp(element, key) is false. If no such element is found, a - * past-the-end iterator is returned. - */ - template - const_iterator - internal_get_biggest_less_than(const K &key, - const comparator &cmp) const - { - const_node_ptr prev = dummy_head.get(); - assert(prev->height() > 0); - - for (size_type h = prev->height(); h > 0; --h) { - internal_find_position(h - 1, prev, key, cmp); - } - - if (prev == dummy_head.get()) - return end(); - - return const_iterator(prev); - } - - iterator - internal_erase(const_iterator pos, obj::p &size_diff) - { - assert(pos != end()); - - obj::pool_base pop = get_pool_base(); - - std::pair - extract_result(nullptr, nullptr); - - obj::flat_transaction::run(pop, [&] { - extract_result = internal_extract(pos); - - /* Make sure that node was extracted */ - assert(extract_result.first != nullptr); - delete_node(extract_result.first); - --size_diff; - obj::flat_transaction::snapshot((size_type *)&_size); - --_size; - }); - - return iterator(extract_result.second.get()); - } - - /** - * @returns a pointer to extracted node and a pointer to next node - */ - std::pair - internal_extract(const_iterator it) - { - assert(dummy_head->height() > 0); - assert(it != end()); - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - - const key_type &key = traits_type::get_key(*it); - - prev_array_type prev_nodes; - next_array_type next_nodes; - - fill_prev_next_arrays(prev_nodes, next_nodes, key, _compare); - - node_ptr erase_node = next_nodes[0].get(); - assert(erase_node != nullptr); - - if (!_compare(key, get_key(erase_node))) { - /* XXX: this assertion will fail in case of multimap - * because we take the first node with the same key. - * Need to extend algorithm for mutimap. */ - assert(erase_node == it.node); - return internal_extract_node(prev_nodes, next_nodes, - erase_node); - } - - return std::pair( - nullptr, nullptr); - } - - std::pair - internal_extract_node(const prev_array_type &prev_nodes, - const next_array_type &next_nodes, - node_ptr erase_node) - { - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - assert(erase_node != nullptr); - for (size_type level = 0; level < erase_node->height(); - ++level) { - assert(prev_nodes[level]->height() > level); - assert(next_nodes[level].get() == erase_node); - prev_nodes[level]->set_next_tx(level, - erase_node->next(level)); - } - - return std::pair( - next_nodes[0], erase_node->next(0)); - } - - /** - * Get the persistent memory pool where hashmap resides. - * @returns pmem::obj::pool_base object. - */ - obj::pool_base - get_pool_base() const - { - PMEMobjpool *pop = pmemobj_pool_by_ptr(this); - return obj::pool_base(pop); - } - - void - internal_copy(const swmr_skip_list &other) - { - internal_copy(other.begin(), other.end()); - } - - template - void - internal_copy(Iterator first, Iterator last) - { - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - - prev_array_type prev_nodes; - prev_nodes.fill(dummy_head.get()); - size_type sz = 0; - - for (; first != last; ++first, ++sz) { - persistent_node_ptr new_node = create_node(*first); - node_ptr n = new_node.get(); - for (size_type level = 0; level < n->height(); - ++level) { - prev_nodes[level]->set_next_tx(level, new_node); - prev_nodes[level] = n; - } - } - - on_init_size = sz; - /* - * As internal_swap can only be called from one thread, and - * there can be an outer transaction we must make sure that size - * change is transactional - */ - obj::flat_transaction::snapshot((size_type *)&_size); - _size = sz; - assert(std::is_sorted( - this->begin(), this->end(), - [&](const value_type &lhs, const value_type &rhs) { - return lhs.first < rhs.first; - })); - } - - /** Generate random level */ - size_type - random_level() - { - return _rnd_generator(); - } - - static size_type - calc_node_size(size_type height) - { - return sizeof(list_node_type) + - height * sizeof(typename list_node_type::node_pointer); - } - - /** Creates new node */ - template - persistent_node_ptr - create_node(Args &&... args) - { - size_type levels = random_level(); - - return create_node( - std::forward_as_tuple(levels), - std::forward_as_tuple(std::forward(args)...)); - } - - template - persistent_node_ptr - create_node(std::tuple &&node_args, - std::tuple &&value_args) - { - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - - persistent_node_ptr node = creates_dummy_node( - std::forward>(node_args), - index_sequence_for{}); - - construct_value_type( - node, - std::forward>(value_args), - index_sequence_for{}); - - return node; - } - - template - void - construct_value_type(persistent_node_ptr node, Tuple &&args, - index_sequence) - { - node_ptr new_node = node.get(); - - node_allocator_traits::construct( - _node_allocator, new_node->get(), - std::get(std::forward(args))...); - } - - /** - * Creates dummy head. - * - * @pre Always called from ctor. - */ - void - create_dummy_head() - { - dummy_head = creates_dummy_node(MAX_LEVEL); - } - - template - persistent_node_ptr - creates_dummy_node(Tuple &&args, index_sequence) - { - return creates_dummy_node( - std::get(std::forward(args))...); - } - - /** - * Creates new node, value_type should be constructed separately. - * Each node object has different size which depends on number of layers - * the node is linked. In this method we calculate the size of the new - * node based on the node height. Then required amount of bytes are - * allocated and casted to the persistent_node_ptr. - * - * @pre Should be called inside transaction. - */ - template - persistent_node_ptr - creates_dummy_node(size_type height, Args &&... args) - { - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - size_type sz = calc_node_size(height); - - persistent_node_ptr n = - node_allocator_traits::allocate(_node_allocator, sz) - .raw(); - - assert(n != nullptr); - - node_allocator_traits::construct(_node_allocator, n.get(), - height, - std::forward(args)...); - - return n; - } - - template - void - delete_node(persistent_node_ptr &node) - { - assert(pmemobj_tx_stage() == TX_STAGE_WORK); - node_ptr n = node.get(); - size_type sz = calc_node_size(n->height()); - - /* Destroy value */ - if (!is_dummy) - node_allocator_traits::destroy(_node_allocator, - n->get()); - /* Destroy node */ - node_allocator_traits::destroy(_node_allocator, n); - /* Deallocate memory */ - deallocate_node(node, sz); - node = nullptr; - } - - void - deallocate_node(persistent_node_ptr &node, size_type sz) - { - /* - * Each node object has different size which depends on number - * of layers the node is linked. Therefore, allocate/deallocate - * just a raw byte array. persistent_ptr is used as a - * pointer to raw array of bytes. - */ - obj::persistent_ptr tmp = - node.to_persistent_ptr().raw(); - node_allocator_traits::deallocate(_node_allocator, tmp, sz); - } - - void - delete_dummy_head() - { - assert(dummy_head != nullptr); - delete_node(dummy_head); - assert(dummy_head == nullptr); - } - - iterator - get_iterator(const_iterator it) - { - return iterator( - const_cast(it.node)); - } - - /** Process any information which was saved to tls and clears tls */ - void - tls_restore() - { - int64_t last_run_size = 0; - obj::pool_base pop = get_pool_base(); - - for (auto &tls_entry : tls_data) { - persistent_node_ptr &node = tls_entry.ptr; - auto &size_diff = tls_entry.size_diff; - if (node) { - /* - * We are completing inserts which were in - * progress before the crash because readers - * might saw incompleted inserts before the - * crash. We set the in_progress flag inside - * try_insert_node function when we locked the - * predecessors for the new node, therefore, - * only single node with the same key might have - * the in_progress status. - */ - if (tls_entry.insert_stage == in_progress) { - complete_insert(tls_entry); - } else { - obj::flat_transaction::run(pop, [&] { - --(tls_entry.size_diff); - delete_node(node); - node = nullptr; - }); - } - } - - assert(node == nullptr); - - last_run_size += size_diff; - } - - /* Make sure that on_init_size + last_run_size >= 0 */ - assert(last_run_size >= 0 || - on_init_size > - static_cast(std::abs(last_run_size))); - obj::flat_transaction::run(pop, [&] { - tls_data.clear(); - on_init_size += static_cast(last_run_size); - }); - _size = on_init_size; -#if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED - VALGRIND_PMC_DO_FLUSH(&_size, sizeof(_size)); -#endif - } - - void - complete_insert(tls_entry_type &tls_entry) - { - persistent_node_ptr &node = tls_entry.ptr; - assert(node != nullptr); - assert(tls_entry.insert_stage == in_progress); - prev_array_type prev_nodes; - next_array_type next_nodes; - node_ptr n = node.get(); - const key_type &key = get_key(n); - size_type height = n->height(); - - fill_prev_next_arrays(prev_nodes, next_nodes, key, _compare); - obj::pool_base pop = get_pool_base(); - - /* Node was partially linked */ - for (size_type level = 0; level < height; ++level) { - assert(prev_nodes[level]->height() > level); - assert(prev_nodes[level]->next(level) == - next_nodes[level]); - - if (prev_nodes[level]->next(level) != node) { - /* Otherwise, node already linked on - * this layer */ - assert(n->next(level) == next_nodes[level]); - prev_nodes[level]->set_next(level, node); - } - } - - node = nullptr; - pop.persist(&node, sizeof(node)); - } - - struct not_greater_compare { - const key_compare &my_less_compare; - - not_greater_compare(const key_compare &less_compare) - : my_less_compare(less_compare) - { - } - - template - bool - operator()(const K1 &first, const K2 &second) const - { - return !my_less_compare(second, first); - } - }; - - const uint64_t pool_uuid = pmemobj_oid(this).pool_uuid_lo; - node_allocator_type _node_allocator; - key_compare _compare; - random_level_generator_type _rnd_generator; - persistent_node_ptr dummy_head; - - enumerable_thread_specific tls_data; - - std::atomic _size; - - /** - * This variable holds real size after the skip list is initialized. - * It holds real value of size only after initialization (before any - * insert/remove). - */ - obj::p on_init_size; -}; /* class swmr_skip_list */ - -} /* namespace detail */ -} /* namespace pmem */ - -#endif /* PMEMOBJ_SWMR_SKIP_LIST_IMPL_HPP */ diff --git a/tests/ptr/pa_self_relative_ptr.cpp b/tests/ptr/pa_self_relative_ptr.cpp index 1efe91bbf0..eb917de410 100644 --- a/tests/ptr/pa_self_relative_ptr.cpp +++ b/tests/ptr/pa_self_relative_ptr.cpp @@ -9,7 +9,7 @@ #include "ptr.hpp" #include "unittest.hpp" -#include +#include #include #include #include @@ -22,7 +22,8 @@ #define LAYOUT "cpp" template -using pa_self_relative_ptr = nvobj::experimental::pa_self_relative_ptr; +using pa_self_relative_ptr = + nvobj::experimental::self_relative_ptr; using self_relative_ptr_base = nvobj::experimental::self_relative_ptr_base; namespace diff --git a/tests/ptr/pa_self_relative_ptr_arith.cpp b/tests/ptr/pa_self_relative_ptr_arith.cpp index b424d5f0ae..69a6ac9f76 100644 --- a/tests/ptr/pa_self_relative_ptr_arith.cpp +++ b/tests/ptr/pa_self_relative_ptr_arith.cpp @@ -3,9 +3,13 @@ #include "ptr_arith.hpp" -#include +#include #include +template +using pa_self_relative_ptr = + nvobj::experimental::self_relative_ptr; + #define LAYOUT "cpp" static void @@ -25,8 +29,8 @@ test(int argc, char *argv[]) UT_FATAL("!pool::create: %s %s", pe.what(), path); } - test_arith(pop); - test_relational(pop); + test_arith(pop); + test_relational(pop); pop.close(); } diff --git a/tests/ptr/pa_self_relative_ptr_atomic.hpp b/tests/ptr/pa_self_relative_ptr_atomic.hpp index 7c75a16e21..7c80ebbc55 100644 --- a/tests/ptr/pa_self_relative_ptr_atomic.hpp +++ b/tests/ptr/pa_self_relative_ptr_atomic.hpp @@ -4,8 +4,8 @@ #include "thread_helpers.hpp" #include "unittest.hpp" -#include -#include +#include +#include #include #include @@ -15,7 +15,10 @@ constexpr size_t CONCURRENCY = 20; constexpr size_t MEAN_CONCURRENCY = CONCURRENCY * 2; constexpr size_t HIGH_CONCURRENCY = CONCURRENCY * 5; -using pmem::obj::experimental::pa_self_relative_ptr; +template +using pa_self_relative_ptr = + pmem::obj::experimental::self_relative_ptr; +using self_relative_ptr_base = pmem::obj::experimental::self_relative_ptr_base; template using atomic_type = typename std::conditional< diff --git a/tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp b/tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp index b12b46cc66..152618b3ab 100644 --- a/tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp +++ b/tests/ptr/pa_self_relative_ptr_atomic_pmem.cpp @@ -4,8 +4,8 @@ #include "thread_helpers.hpp" #include "unittest.hpp" -#include -#include +#include +#include #include #include #include @@ -20,7 +20,8 @@ namespace nvobj = pmem::obj; template -using pa_self_relative_ptr = pmem::obj::experimental::pa_self_relative_ptr; +using pa_self_relative_ptr = + pmem::obj::experimental::self_relative_ptr; template using atomic_ptr = std::atomic>; diff --git a/tests/ptr/ptr.hpp b/tests/ptr/ptr.hpp index 5b5c777e7f..ae966006db 100644 --- a/tests/ptr/ptr.hpp +++ b/tests/ptr/ptr.hpp @@ -4,7 +4,6 @@ #include "unittest.hpp" #include -#include #include #include #include @@ -19,22 +18,27 @@ namespace nvobj = pmem::obj; namespace nvobjexp = nvobj::experimental; -template