PMDK C++ bindings  1.9.1
This is the C++ bindings documentation for PMDK's libpmemobj.
concurrent_hash_map.hpp
Go to the documentation of this file.
1 /*
2  * Copyright 2019-2020, Intel Corporation
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *
11  * * Redistributions in binary form must reproduce the above copyright
12  * notice, this list of conditions and the following disclaimer in
13  * the documentation and/or other materials provided with the
14  * distribution.
15  *
16  * * Neither the name of the copyright holder nor the names of its
17  * contributors may be used to endorse or promote products derived
18  * from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
39 #ifndef PMEMOBJ_CONCURRENT_HASH_MAP_HPP
40 #define PMEMOBJ_CONCURRENT_HASH_MAP_HPP
41 
44 #include <libpmemobj++/detail/pair.hpp>
46 
47 #include <libpmemobj++/defrag.hpp>
49 #include <libpmemobj++/mutex.hpp>
50 #include <libpmemobj++/p.hpp>
53 
54 #include <libpmemobj++/detail/persistent_pool_ptr.hpp>
56 
58 
59 #include <atomic>
60 #include <cassert>
61 #include <functional>
62 #include <initializer_list>
63 #include <iterator> // for std::distance
64 #include <memory>
65 #include <mutex>
66 #include <thread>
67 #include <type_traits>
68 #include <utility>
69 #include <vector>
70 
71 namespace std
72 {
76 template <typename T>
77 struct hash<pmem::obj::p<T>> {
78  size_t
79  operator()(const pmem::obj::p<T> &x) const
80  {
81  return hash<T>()(x.get_ro());
82  }
83 };
84 } /* namespace std */
85 
86 namespace pmem
87 {
88 namespace obj
89 {
90 
91 namespace concurrent_hash_map_internal
92 {
93 template <typename SharedMutexT>
94 class shared_mutex_scoped_lock {
95  using rw_mutex_type = SharedMutexT;
96 
97 public:
98  shared_mutex_scoped_lock(const shared_mutex_scoped_lock &) = delete;
99  shared_mutex_scoped_lock &
100  operator=(const shared_mutex_scoped_lock &) = delete;
101 
103  shared_mutex_scoped_lock() : mutex(nullptr), is_writer(false)
104  {
105  }
106 
108  shared_mutex_scoped_lock(rw_mutex_type &m, bool write = true)
109  : mutex(nullptr)
110  {
111  acquire(m, write);
112  }
113 
115  ~shared_mutex_scoped_lock()
116  {
117  if (mutex)
118  release();
119  }
120 
122  void
123  acquire(rw_mutex_type &m, bool write = true)
124  {
125  is_writer = write;
126  mutex = &m;
127  if (write)
128  mutex->lock();
129  else
130  mutex->lock_shared();
131  }
132 
136  void
137  release()
138  {
139  assert(mutex);
140  rw_mutex_type *m = mutex;
141  mutex = nullptr;
142  if (is_writer) {
143  m->unlock();
144  } else {
145  m->unlock_shared();
146  }
147  }
148 
153  bool
154  try_acquire(rw_mutex_type &m, bool write = true)
155  {
156  assert(!mutex);
157  bool result;
158  is_writer = write;
159  result = write ? m.try_lock() : m.try_lock_shared();
160  if (result)
161  mutex = &m;
162  return result;
163  }
164 
165 protected:
170  rw_mutex_type *mutex;
175  bool is_writer;
176 }; /* class shared_mutex_scoped_lock */
177 
178 template <typename ScopedLockType>
179 using scoped_lock_upgrade_to_writer =
180  decltype(std::declval<ScopedLockType>().upgrade_to_writer());
181 
182 template <typename ScopedLockType>
183 using scoped_lock_has_upgrade_to_writer =
184  detail::supports<ScopedLockType, scoped_lock_upgrade_to_writer>;
185 
186 template <typename ScopedLockType>
187 using scoped_lock_downgrade_to_reader =
188  decltype(std::declval<ScopedLockType>().downgrade_to_reader());
189 
190 template <typename ScopedLockType>
191 using scoped_lock_has_downgrade_to_reader =
192  detail::supports<ScopedLockType, scoped_lock_downgrade_to_reader>;
193 
194 template <typename ScopedLockType,
195  bool = scoped_lock_has_upgrade_to_writer<ScopedLockType>::value
196  &&scoped_lock_has_downgrade_to_reader<ScopedLockType>::value>
197 class scoped_lock_traits {
198 public:
199  using scope_lock_type = ScopedLockType;
200 
201  static bool
202  initial_rw_state(bool write)
203  {
204  /* For upgradeable locks, initial state is always read */
205  return false;
206  }
207 
208  static bool
209  upgrade_to_writer(scope_lock_type &lock)
210  {
211  return lock.upgrade_to_writer();
212  }
213 
214  static bool
215  downgrade_to_reader(scope_lock_type &lock)
216  {
217  return lock.downgrade_to_reader();
218  }
219 };
220 
221 template <typename ScopedLockType>
222 class scoped_lock_traits<ScopedLockType, false> {
223 public:
224  using scope_lock_type = ScopedLockType;
225 
226  static bool
227  initial_rw_state(bool write)
228  {
229  /* For non-upgradeable locks, we take lock in required mode
230  * immediately */
231  return write;
232  }
233 
234  static bool
235  upgrade_to_writer(scope_lock_type &lock)
236  {
237  /* This overload is for locks which do not support upgrade
238  * operation. For those locks, upgrade_to_writer should not be
239  * called when holding a read lock */
240  return true;
241  }
242 
243  static bool
244  downgrade_to_reader(scope_lock_type &lock)
245  {
246  /* This overload is for locks which do not support downgrade
247  * operation. For those locks, downgrade_to_reader should never
248  * be called */
249  assert(false);
250 
251  return false;
252  }
253 };
254 }
255 
256 template <typename Key, typename T, typename Hash = std::hash<Key>,
257  typename KeyEqual = std::equal_to<Key>,
258  typename MutexType = pmem::obj::shared_mutex,
259  typename ScopedLockType = concurrent_hash_map_internal::
260  shared_mutex_scoped_lock<MutexType>>
261 class concurrent_hash_map;
262 
264 namespace concurrent_hash_map_internal
265 {
266 /* Helper method which throws an exception when called in a tx */
267 static inline void
268 check_outside_tx()
269 {
270  if (pmemobj_tx_stage() != TX_STAGE_NONE)
272  "Function called inside transaction scope.");
273 }
274 
275 template <typename Hash>
276 using transparent_key_equal = typename Hash::transparent_key_equal;
277 
278 template <typename Hash>
279 using has_transparent_key_equal = detail::supports<Hash, transparent_key_equal>;
280 
281 template <typename Hash, typename Pred,
282  bool = has_transparent_key_equal<Hash>::value>
283 struct key_equal_type {
284  using type = typename Hash::transparent_key_equal;
285 };
286 
287 template <typename Hash, typename Pred>
288 struct key_equal_type<Hash, Pred, false> {
289  using type = Pred;
290 };
291 
292 template <typename Mutex, typename ScopedLockType>
293 void
294 assert_not_locked(Mutex &mtx)
295 {
296 #ifndef NDEBUG
297  ScopedLockType scoped_lock;
298  assert(scoped_lock.try_acquire(mtx));
299  scoped_lock.release();
300 #else
301  (void)mtx;
302 #endif
303 }
304 
305 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
306 struct hash_map_node {
308  using mutex_t = MutexType;
309 
311  using scoped_t = ScopedLockType;
312 
313  using value_type = detail::pair<const Key, T>;
314 
316  using node_ptr_t = detail::persistent_pool_ptr<
317  hash_map_node<Key, T, mutex_t, scoped_t>>;
318 
320  node_ptr_t next;
321 
323  mutex_t mutex;
324 
326  value_type item;
327 
328  hash_map_node(const node_ptr_t &_next, const Key &key)
329  : next(_next),
330  item(std::piecewise_construct, std::forward_as_tuple(key),
331  std::forward_as_tuple())
332  {
333  }
334 
335  hash_map_node(const node_ptr_t &_next, const Key &key, const T &t)
336  : next(_next), item(key, t)
337  {
338  }
339 
340  hash_map_node(const node_ptr_t &_next, value_type &&i)
341  : next(_next), item(std::move(i))
342  {
343  }
344 
345  template <typename... Args>
346  hash_map_node(const node_ptr_t &_next, Args &&... args)
347  : next(_next), item(std::forward<Args>(args)...)
348  {
349  }
350 
351  hash_map_node(const node_ptr_t &_next, const value_type &i)
352  : next(_next), item(i)
353  {
354  }
355 
357  hash_map_node(const hash_map_node &) = delete;
358 
360  hash_map_node &operator=(const hash_map_node &) = delete;
361 }; /* struct node */
362 
367 template <typename Bucket>
368 class segment_traits {
369 public:
371  using segment_index_t = size_t;
372  using size_type = size_t;
373  using bucket_type = Bucket;
374 
375 protected:
377  constexpr static size_type max_allocation_size = PMEMOBJ_MAX_ALLOC_SIZE;
378 
380  constexpr static segment_index_t first_big_block = 27;
381  /* TODO: avoid hardcoded value; need constexpr similar to:
382  * Log2(max_allocation_size / sizeof(bucket_type)) */
383 
385  constexpr static size_type big_block_size = size_type(1)
386  << first_big_block;
387 
388  /* Block size in bytes cannot exceed max_allocation_size */
389  static_assert((big_block_size * sizeof(bucket_type)) <
390  max_allocation_size,
391  "Block size exceeds max_allocation_size");
392 
394  constexpr static segment_index_t
395  first_block_in_segment(segment_index_t seg)
396  {
397  return seg < first_big_block
398  ? seg
399  : (first_big_block +
400  (segment_index_t(1) << (seg - first_big_block)) - 1);
401  }
402 
404  constexpr static size_type
405  blocks_in_segment(segment_index_t seg)
406  {
407  return seg < first_big_block
408  ? segment_index_t(1)
409  : segment_index_t(1) << (seg - first_big_block);
410  }
411 
413  constexpr static size_type
414  block_size(segment_index_t b)
415  {
416  return b < first_big_block ? segment_size(b ? b : 1)
417  : big_block_size;
418  }
419 
420 public:
422  constexpr static segment_index_t embedded_segments = 1;
423 
425  constexpr static size_type embedded_buckets = 1 << embedded_segments;
426 
428  constexpr static segment_index_t number_of_segments = 32;
429 
431  static const size_type first_block = 8;
432 
434  constexpr static segment_index_t
435  number_of_blocks()
436  {
437  return first_block_in_segment(number_of_segments);
438  }
439 
441  static segment_index_t
442  segment_index_of(size_type index)
443  {
444  return segment_index_t(detail::Log2(index | 1));
445  }
446 
448  constexpr static segment_index_t
449  segment_base(segment_index_t k)
450  {
451  return (segment_index_t(1) << k) & ~segment_index_t(1);
452  }
453 
455  constexpr static size_type
456  segment_size(segment_index_t k)
457  {
458  return size_type(1) << k; // fake value for k == 0
459  }
460  static_assert(
461  embedded_segments < first_big_block,
462  "Number of embedded segments cannot exceed max_allocation_size");
463 }; /* End of class segment_traits */
464 
481 template <typename BlockTable, typename SegmentTraits, bool is_const>
482 class segment_facade_impl : public SegmentTraits {
483 private:
484  using traits_type = SegmentTraits;
485  using traits_type::block_size;
486  using traits_type::blocks_in_segment;
487  using traits_type::embedded_buckets;
488  using traits_type::embedded_segments;
489  using traits_type::first_block;
490  using traits_type::first_block_in_segment;
491  using traits_type::segment_base;
492  using traits_type::segment_size;
493 
494 public:
495  using table_reference =
496  typename std::conditional<is_const, const BlockTable &,
497  BlockTable &>::type;
498 
499  using table_pointer =
500  typename std::conditional<is_const, const BlockTable *,
501  BlockTable *>::type;
502 
503  using bucket_type = typename traits_type::bucket_type;
504  using segment_index_t = typename traits_type::segment_index_t;
505  using size_type = typename traits_type::size_type;
506 
508  segment_facade_impl(table_reference table, segment_index_t s)
509  : my_table(&table), my_seg(s)
510  {
511  assert(my_seg < traits_type::number_of_segments);
512  }
513 
515  segment_facade_impl(const segment_facade_impl &src)
516  : my_table(src.my_table), my_seg(src.my_seg)
517  {
518  }
519 
520  segment_facade_impl(segment_facade_impl &&src) = default;
521 
523  segment_facade_impl &
524  operator=(const segment_facade_impl &src)
525  {
526  my_table = src.my_table;
527  my_seg = src.my_seg;
528  return *this;
529  }
530 
532  segment_facade_impl &
533  operator=(segment_facade_impl &&src)
534  {
535  my_table = src.my_table;
536  my_seg = src.my_seg;
537  return *this;
538  }
539 
546  bucket_type &operator[](size_type i) const
547  {
548  assert(i < size());
549 
550  segment_index_t table_block = first_block_in_segment(my_seg);
551  size_type b_size = block_size(table_block);
552 
553  table_block += i / b_size;
554  i = i % b_size;
555 
556  return (*my_table)[table_block][static_cast<std::ptrdiff_t>(i)];
557  }
558 
562  segment_facade_impl &
563  operator++()
564  {
565  ++my_seg;
566  return *this;
567  }
568 
572  segment_facade_impl
573  operator++(int)
574  {
575  segment_facade_impl tmp = *this;
576  ++(*this);
577  return tmp;
578  }
579 
583  segment_facade_impl &
584  operator--()
585  {
586  --my_seg;
587  return *this;
588  }
589 
593  segment_facade_impl
594  operator--(int)
595  {
596  segment_facade_impl tmp = *this;
597  --(*this);
598  return tmp;
599  }
600 
604  segment_facade_impl &
605  operator+=(segment_index_t off)
606  {
607  my_seg += off;
608  return *this;
609  }
610 
614  segment_facade_impl &
615  operator-=(segment_index_t off)
616  {
617  my_seg -= off;
618  return *this;
619  }
620 
624  segment_facade_impl
625  operator+(segment_index_t off) const
626  {
627  return segment_facade_impl(*(this->my_table),
628  this->my_seg + off);
629  }
630 
634  segment_facade_impl
635  operator-(segment_index_t off) const
636  {
637  return segment_facade_impl(*(this->my_table),
638  this->my_seg - off);
639  }
640 
644  void
645  enable(pool_base &pop)
646  {
647  assert(my_seg >= embedded_segments);
648 
649  if (my_seg < first_block) {
650  enable_first_block(pop);
651  } else {
652  enable_big_segment(pop);
653  }
654  }
655 
659  void
660  disable()
661  {
662  assert(my_seg >= embedded_segments);
663 
664  if (my_seg < first_block) {
665  if (my_seg == embedded_segments) {
666  size_type sz = segment_size(first_block) -
667  embedded_buckets;
668  delete_persistent<bucket_type[]>(
669  (*my_table)[my_seg], sz);
670  }
671  (*my_table)[my_seg] = nullptr;
672  } else {
673  block_range blocks = segment_blocks(my_seg);
674 
675  for (segment_index_t b = blocks.first;
676  b < blocks.second; ++b) {
677  if ((*my_table)[b] != nullptr) {
678  delete_persistent<bucket_type[]>(
679  (*my_table)[b], block_size(b));
680  (*my_table)[b] = nullptr;
681  }
682  }
683  }
684  }
685 
689  constexpr size_type
690  size() const
691  {
692  return segment_size(my_seg ? my_seg : 1);
693  }
694 
700  bool
701  is_valid() const
702  {
703  block_range blocks = segment_blocks(my_seg);
704 
705  for (segment_index_t b = blocks.first; b < blocks.second; ++b) {
706  if ((*my_table)[b] == nullptr)
707  return false;
708  }
709 
710  return true;
711  }
712 
713 private:
714  using block_range = std::pair<segment_index_t, segment_index_t>;
715 
719  static block_range
720  segment_blocks(segment_index_t seg)
721  {
722  segment_index_t begin = first_block_in_segment(seg);
723 
724  return block_range(begin, begin + blocks_in_segment(seg));
725  }
726 
727  void
728  enable_first_block(pool_base &pop)
729  {
730  assert(my_seg == embedded_segments);
731  {
732  transaction::manual tx(pop);
733 
734  size_type sz =
735  segment_size(first_block) - embedded_buckets;
736  (*my_table)[my_seg] =
737  make_persistent<bucket_type[]>(sz);
738 
739  persistent_ptr<bucket_type> base =
740  (*my_table)[embedded_segments].raw();
741 
742  for (segment_index_t s = my_seg + 1; s < first_block;
743  ++s) {
744  std::ptrdiff_t off =
745  static_cast<std::ptrdiff_t>(
746  segment_base(s) -
747  segment_base(my_seg));
748 
749  (*my_table)[s] = (base + off).raw();
750  }
751 
753  }
754  }
755 
756  void
757  enable_big_segment(pool_base &pop)
758  {
759  block_range blocks = segment_blocks(my_seg);
760  {
761  transaction::manual tx(pop);
762 
763  for (segment_index_t b = blocks.first;
764  b < blocks.second; ++b) {
765  assert((*my_table)[b] == nullptr);
766  (*my_table)[b] = make_persistent<bucket_type[]>(
767  block_size(b));
768  }
769 
771  }
772  }
773 
775  table_pointer my_table;
776 
778  segment_index_t my_seg;
779 }; /* End of class segment_facade_impl */
780 
787 template <typename Key, typename T, typename MutexType, typename ScopedLockType>
788 class hash_map_base {
789 public:
790  using mutex_t = MutexType;
791  using scoped_t = ScopedLockType;
792 
794  using size_type = size_t;
795 
797  using hashcode_type = size_t;
798 
800  using node = hash_map_node<Key, T, mutex_t, scoped_t>;
801 
803  using node_ptr_t = detail::persistent_pool_ptr<node>;
804 
806  struct bucket {
807  using mutex_t = MutexType;
808  using scoped_t = ScopedLockType;
809 
811  mutex_t mutex;
812 
814  p<std::atomic<uint64_t>> rehashed;
815 
817  node_ptr_t node_list;
818 
820  bucket() : node_list(nullptr)
821  {
822 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
823  VALGRIND_HG_DISABLE_CHECKING(&rehashed,
824  sizeof(rehashed));
825 #endif
826  rehashed.get_rw() = false;
827  }
828 
834  bool
835  is_rehashed(std::memory_order order)
836  {
837  return rehashed.get_ro().load(order);
838  }
839 
840  void
841  set_rehashed(std::memory_order order)
842  {
843  rehashed.get_rw().store(true, order);
844  }
845 
847  bucket(const bucket &) = delete;
848 
850  bucket &operator=(const bucket &) = delete;
851  }; /* End of struct bucket */
852 
854  using segment_traits_t = segment_traits<bucket>;
855 
857  using segment_index_t = typename segment_traits_t::segment_index_t;
858 
860  static const size_type embedded_buckets =
861  segment_traits_t::embedded_buckets;
862 
864  static const size_type first_block = segment_traits_t::first_block;
865 
867  constexpr static size_type block_table_size =
868  segment_traits_t::number_of_blocks();
869 
871  using segment_ptr_t = persistent_ptr<bucket[]>;
872 
874  using bucket_ptr_t = persistent_ptr<bucket>;
875 
877  using blocks_table_t = segment_ptr_t[block_table_size];
878 
880  using segment_enable_mutex_t = pmem::obj::mutex;
881 
883  struct tls_data_t {
884  p<int64_t> size_diff = 0;
885  std::aligned_storage<56, 8> padding;
886  };
887 
888  using tls_t = detail::enumerable_thread_specific<tls_data_t>;
889 
890  enum feature_flags : uint32_t { FEATURE_CONSISTENT_SIZE = 1 };
891 
893  struct features {
894  p<uint32_t> compat;
895  p<uint32_t> incompat;
896  };
897 
898  /* --------------------------------------------------------- */
899 
901  p<uint64_t> my_pool_uuid;
902 
905  features layout_features;
906 
909  std::aligned_storage<sizeof(size_t), sizeof(size_t)>::type
910  my_mask_reserved;
911 
913  /* my_mask always restored on restart. */
914  std::atomic<hashcode_type> my_mask;
915 
916  /* Size of value (key and value pair) stored in a pool */
917  std::size_t value_size;
918 
920  std::aligned_storage<24, 8>::type padding1;
921 
926  blocks_table_t my_table;
927 
928  /* It must be in separate cache line from my_mask due to performance
929  * effects */
931  std::atomic<size_type> my_size;
932 
934  std::aligned_storage<24, 8>::type padding2;
935 
937  persistent_ptr<tls_t> tls_ptr;
938 
944  p<size_t> on_init_size;
945 
947  std::aligned_storage<40, 8>::type reserved;
948 
950  segment_enable_mutex_t my_segment_enable_mutex;
951 
953  bucket my_embedded_segment[embedded_buckets];
954 
955  /* --------------------------------------------------------- */
956 
958  static constexpr features
959  header_features()
960  {
961  return {FEATURE_CONSISTENT_SIZE, 0};
962  }
963 
964  const std::atomic<hashcode_type> &
965  mask() const noexcept
966  {
967  return my_mask;
968  }
969 
970  std::atomic<hashcode_type> &
971  mask() noexcept
972  {
973  return my_mask;
974  }
975 
976  size_t
977  size() const
978  {
979  return my_size.load(std::memory_order_relaxed);
980  }
981 
982  p<int64_t> &
983  thread_size_diff()
984  {
985  assert(this->tls_ptr != nullptr);
986  return this->tls_ptr->local().size_diff;
987  }
988 
990  void
991  tls_restore()
992  {
993  assert(this->tls_ptr != nullptr);
994 
995  pool_base pop = pool_base{pmemobj_pool_by_ptr(this)};
996 
997  int64_t last_run_size = 0;
998  for (auto &data : *tls_ptr)
999  last_run_size += data.size_diff;
1000 
1001  /* Make sure that on_init_size + last_run_size >= 0 */
1002  assert(last_run_size >= 0 ||
1003  static_cast<int64_t>(static_cast<size_t>(last_run_size) +
1004  on_init_size) >= 0);
1005 
1006  transaction::run(pop, [&] {
1007  on_init_size += static_cast<size_t>(last_run_size);
1008  tls_ptr->clear();
1009  });
1010 
1011  this->my_size = on_init_size;
1012  }
1013 
1015  using const_segment_facade_t =
1016  segment_facade_impl<blocks_table_t, segment_traits_t, true>;
1017 
1019  using segment_facade_t =
1020  segment_facade_impl<blocks_table_t, segment_traits_t, false>;
1021 
1023  hash_map_base()
1024  {
1025  static_assert(
1026  sizeof(size_type) == sizeof(std::atomic<size_type>),
1027  "std::atomic should have the same layout as underlying integral type");
1028 
1029 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1030  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1031 #endif
1032  layout_features = {0, 0};
1033 
1034  PMEMoid oid = pmemobj_oid(this);
1035 
1036  assert(!OID_IS_NULL(oid));
1037 
1038  my_pool_uuid = oid.pool_uuid_lo;
1039 
1040  pool_base pop = get_pool_base();
1041  /* enable embedded segments */
1042  for (size_type i = 0; i < segment_traits_t::embedded_segments;
1043  ++i) {
1044  my_table[i] =
1045  pmemobj_oid(my_embedded_segment +
1046  segment_traits_t::segment_base(i));
1047  segment_facade_t seg(my_table, i);
1048  mark_rehashed<false>(pop, seg);
1049  }
1050 
1051  on_init_size = 0;
1052 
1053  value_size = 0;
1054 
1055  this->tls_ptr = nullptr;
1056  }
1057 
1058  /*
1059  * Should be called before concurrent_hash_map destructor is called.
1060  * Otherwise, program can terminate if an exception occurs wile freeing
1061  * memory inside dtor.
1062  */
1063  void
1064  free_tls()
1065  {
1066  auto pop = get_pool_base();
1067 
1068  if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1069  tls_ptr) {
1070  transaction::run(pop, [&] {
1071  delete_persistent<tls_t>(tls_ptr);
1072  tls_ptr = nullptr;
1073  });
1074  }
1075  }
1076 
1080  void
1081  calculate_mask()
1082  {
1083 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1084  VALGRIND_HG_DISABLE_CHECKING(&my_size, sizeof(my_size));
1085  VALGRIND_HG_DISABLE_CHECKING(&my_mask, sizeof(my_mask));
1086 #endif
1087 #if LIBPMEMOBJ_CPP_VG_PMEMCHECK_ENABLED
1088  VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_size, sizeof(my_size));
1089  VALGRIND_PMC_REMOVE_PMEM_MAPPING(&my_mask, sizeof(my_mask));
1090 #endif
1091 
1092  hashcode_type m = embedded_buckets - 1;
1093 
1094  const_segment_facade_t segment(
1095  my_table, segment_traits_t::embedded_segments);
1096 
1097  while (segment.is_valid()) {
1098  m += segment.size();
1099  ++segment;
1100  }
1101 
1102  mask().store(m, std::memory_order_relaxed);
1103  }
1104 
1108  template <bool Flush = true>
1109  void
1110  mark_rehashed(pool_base &pop, segment_facade_t &segment)
1111  {
1112  for (size_type i = 0; i < segment.size(); ++i) {
1113  bucket *b = &(segment[i]);
1114 
1115  assert_not_locked<mutex_t, scoped_t>(b->mutex);
1116 
1117  b->set_rehashed(std::memory_order_relaxed);
1118  }
1119 
1120  if (Flush) {
1121  /* Flush in separate loop to avoid read-after-flush */
1122  for (size_type i = 0; i < segment.size(); ++i) {
1123  bucket *b = &(segment[i]);
1124  pop.flush(b->rehashed);
1125  }
1126 
1127  pop.drain();
1128  }
1129  }
1130 
1134  void
1135  enable_segment(segment_index_t k, bool is_initial = false)
1136  {
1137  assert(k);
1138 
1139  pool_base pop = get_pool_base();
1140  size_type sz;
1141 
1142  if (k >= first_block) {
1143  segment_facade_t new_segment(my_table, k);
1144 
1145  sz = new_segment.size();
1146  if (!new_segment.is_valid())
1147  new_segment.enable(pop);
1148 
1149  if (is_initial) {
1150  mark_rehashed(pop, new_segment);
1151  }
1152 
1153  /* double it to get entire capacity of the container */
1154  sz <<= 1;
1155  } else {
1156  /* the first block */
1157  assert(k == segment_traits_t::embedded_segments);
1158 
1159  for (segment_index_t i = k; i < first_block; ++i) {
1160  segment_facade_t new_segment(my_table, i);
1161 
1162  if (!new_segment.is_valid())
1163  new_segment.enable(pop);
1164 
1165  if (is_initial) {
1166  mark_rehashed(pop, new_segment);
1167  }
1168  }
1169 
1170  sz = segment_traits_t::segment_size(first_block);
1171  }
1172 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1173  ANNOTATE_HAPPENS_BEFORE(&my_mask);
1174 #endif
1175  mask().store(sz - 1, std::memory_order_release);
1176  }
1177 
1182  bucket *
1183  get_bucket(hashcode_type h) const
1184  {
1185  segment_index_t s = segment_traits_t::segment_index_of(h);
1186 
1187  h -= segment_traits_t::segment_base(s);
1188 
1189  const_segment_facade_t segment(my_table, s);
1190 
1191  assert(segment.is_valid());
1192 
1193  return &(segment[h]);
1194  }
1195 
1199  inline bool
1200  check_mask_race(hashcode_type h, hashcode_type &m) const
1201  {
1202  hashcode_type m_now, m_old = m;
1203 
1204  m_now = mask().load(std::memory_order_acquire);
1205 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
1206  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
1207 #endif
1208 
1209  if (m_old != m_now)
1210  return check_rehashing_collision(h, m_old, m = m_now);
1211 
1212  return false;
1213  }
1214 
1218  bool
1219  check_rehashing_collision(hashcode_type h, hashcode_type m_old,
1220  hashcode_type m) const
1221  {
1222  assert(m_old != m);
1223 
1224  if ((h & m_old) != (h & m)) {
1225  /* mask changed for this hashcode, rare event condition
1226  * above proves that 'h' has some other bits set beside
1227  * 'm_old', find next applicable mask after m_old */
1228 
1229  for (++m_old; !(h & m_old); m_old <<= 1)
1230  ;
1231 
1232  m_old = (m_old << 1) - 1; /* get full mask from a bit */
1233 
1234  assert((m_old & (m_old + 1)) == 0 && m_old <= m);
1235 
1236  /* check whether it is rehashing/ed */
1237  bucket *b = get_bucket(h & m_old);
1238  return b->is_rehashed(std::memory_order_acquire);
1239  }
1240 
1241  return false;
1242  }
1243 
1248  template <typename Node, typename... Args>
1249  void
1250  insert_new_node_internal(bucket *b,
1251  detail::persistent_pool_ptr<Node> &new_node,
1252  Args &&... args)
1253  {
1254  assert(pmemobj_tx_stage() == TX_STAGE_WORK);
1255 
1256  new_node = pmem::obj::make_persistent<Node>(
1257  b->node_list, std::forward<Args>(args)...);
1258  b->node_list = new_node; /* bucket is locked */
1259  }
1260 
1265  template <typename Node, typename... Args>
1266  size_type
1267  insert_new_node(bucket *b, detail::persistent_pool_ptr<Node> &new_node,
1268  Args &&... args)
1269  {
1270  pool_base pop = get_pool_base();
1271 
1272  /*
1273  * This is only true when called from singlethreaded methods
1274  * like swap() or operator=. In that case it's safe to directly
1275  * modify on_init_size.
1276  */
1277  if (pmemobj_tx_stage() == TX_STAGE_WORK) {
1278  insert_new_node_internal(b, new_node,
1279  std::forward<Args>(args)...);
1280  this->on_init_size++;
1281  } else {
1282  auto &size_diff = thread_size_diff();
1283 
1284  pmem::obj::transaction::run(pop, [&] {
1285  insert_new_node_internal(
1286  b, new_node,
1287  std::forward<Args>(args)...);
1288  ++size_diff;
1289  });
1290  }
1291 
1292  /* Increment volatile size */
1293  return ++(this->my_size);
1294  }
1295 
1300  bool
1301  check_growth(hashcode_type m, size_type sz)
1302  {
1303  if (sz >= m) {
1304  segment_index_t new_seg =
1305  static_cast<segment_index_t>(detail::Log2(
1306  m +
1307  1)); /* optimized segment_index_of */
1308 
1309  assert(segment_facade_t(my_table, new_seg - 1)
1310  .is_valid());
1311 
1312  std::unique_lock<segment_enable_mutex_t> lock(
1313  my_segment_enable_mutex, std::try_to_lock);
1314 
1315  if (lock) {
1316  if (mask().load(std::memory_order_relaxed) ==
1317  m) {
1318  /* Otherwise, other thread enable this
1319  * segment */
1320  enable_segment(new_seg);
1321 
1322  return true;
1323  }
1324  }
1325  }
1326 
1327  return false;
1328  }
1329 
1333  void
1334  reserve(size_type buckets)
1335  {
1336  if (buckets == 0)
1337  return;
1338 
1339  --buckets;
1340 
1341  bool is_initial = this->size() == 0;
1342 
1343  for (size_type m = mask(); buckets > m; m = mask())
1344  enable_segment(
1345  segment_traits_t::segment_index_of(m + 1),
1346  is_initial);
1347  }
1348 
1353  void
1354  internal_swap(hash_map_base<Key, T, mutex_t, scoped_t> &table)
1355  {
1356  pool_base p = get_pool_base();
1357  {
1358  transaction::manual tx(p);
1359 
1360  this->my_pool_uuid.swap(table.my_pool_uuid);
1361 
1362  /*
1363  * As internal_swap can only be called
1364  * from one thread, and there can be an outer
1365  * transaction we must make sure that mask and size
1366  * changes are transactional
1367  */
1368  transaction::snapshot((size_t *)&this->my_mask);
1369  transaction::snapshot((size_t *)&this->my_size);
1370 
1371  this->mask() = table.mask().exchange(
1372  this->mask(), std::memory_order_relaxed);
1373 
1374  this->my_size = table.my_size.exchange(
1375  this->my_size, std::memory_order_relaxed);
1376 
1377  /* Swap consistent size */
1378  std::swap(this->tls_ptr, table.tls_ptr);
1379 
1380  for (size_type i = 0; i < embedded_buckets; ++i)
1381  this->my_embedded_segment[i].node_list.swap(
1382  table.my_embedded_segment[i].node_list);
1383 
1384  for (size_type i = segment_traits_t::embedded_segments;
1385  i < block_table_size; ++i)
1386  this->my_table[i].swap(table.my_table[i]);
1387 
1389  }
1390  }
1391 
1396  pool_base
1397  get_pool_base()
1398  {
1399  PMEMobjpool *pop =
1400  pmemobj_pool_by_oid(PMEMoid{my_pool_uuid, 0});
1401 
1402  return pool_base(pop);
1403  }
1404 }; /* End of class hash_map_base */
1405 
1411 template <typename Container, bool is_const>
1412 class hash_map_iterator {
1413 public:
1414  using iterator_category = std::forward_iterator_tag;
1415  using difference_type = ptrdiff_t;
1416  using map_type = Container;
1417  using value_type = typename map_type::value_type;
1418  using node = typename map_type::node;
1419  using bucket = typename map_type::bucket;
1420  using map_ptr = typename std::conditional<is_const, const map_type *,
1421  map_type *>::type;
1422  using reference =
1423  typename std::conditional<is_const,
1424  typename map_type::const_reference,
1425  typename map_type::reference>::type;
1426  using pointer =
1427  typename std::conditional<is_const,
1428  typename map_type::const_pointer,
1429  typename map_type::pointer>::type;
1430 
1431  template <typename C, bool M, bool U>
1432  friend bool operator==(const hash_map_iterator<C, M> &i,
1433  const hash_map_iterator<C, U> &j);
1434 
1435  template <typename C, bool M, bool U>
1436  friend bool operator!=(const hash_map_iterator<C, M> &i,
1437  const hash_map_iterator<C, U> &j);
1438 
1439  friend class hash_map_iterator<map_type, true>;
1440 
1441 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
1442 private:
1443  template <typename Key, typename T, typename Hash, typename KeyEqual,
1444  typename MutexType, typename ScopedLockType>
1445  friend class ::pmem::obj::concurrent_hash_map;
1446 #else
1447 public: /* workaround */
1448 #endif
1449  hash_map_iterator(map_ptr map, size_t index)
1450  : my_map(map), my_index(index), my_bucket(nullptr), my_node(nullptr)
1451  {
1452  if (my_index <= my_map->mask()) {
1453  bucket_accessor acc(my_map, my_index);
1454  my_bucket = acc.get();
1455  my_node = static_cast<node *>(
1456  my_bucket->node_list.get(my_map->my_pool_uuid));
1457 
1458  if (!my_node) {
1459  advance_to_next_bucket();
1460  }
1461  }
1462  }
1463 
1464 public:
1466  hash_map_iterator() = default;
1467 
1469  hash_map_iterator(const hash_map_iterator &other)
1470  : my_map(other.my_map),
1471  my_index(other.my_index),
1472  my_bucket(other.my_bucket),
1473  my_node(other.my_node)
1474  {
1475  }
1476 
1478  template <typename U = void,
1479  typename = typename std::enable_if<is_const, U>::type>
1480  hash_map_iterator(const hash_map_iterator<map_type, false> &other)
1481  : my_map(other.my_map),
1482  my_index(other.my_index),
1483  my_bucket(other.my_bucket),
1484  my_node(other.my_node)
1485  {
1486  }
1487 
1488  hash_map_iterator &operator=(const hash_map_iterator &it) = default;
1489 
1491  reference operator*() const
1492  {
1493  assert(my_node);
1494  return my_node->item;
1495  }
1496 
1498  pointer operator->() const
1499  {
1500  return &operator*();
1501  }
1502 
1504  hash_map_iterator &
1505  operator++()
1506  {
1507  my_node = static_cast<node *>(
1508  my_node->next.get((my_map->my_pool_uuid)));
1509 
1510  if (!my_node)
1511  advance_to_next_bucket();
1512 
1513  return *this;
1514  }
1515 
1517  hash_map_iterator
1518  operator++(int)
1519  {
1520  hash_map_iterator old(*this);
1521  operator++();
1522  return old;
1523  }
1524 
1525 private:
1527  map_ptr my_map = nullptr;
1528 
1530  size_t my_index = 0;
1531 
1533  bucket *my_bucket = nullptr;
1534 
1536  node *my_node = nullptr;
1537 
1538  class bucket_accessor {
1539  public:
1540  bucket_accessor(map_ptr m, size_t index)
1541  {
1542  my_bucket = m->get_bucket(index);
1543  }
1544 
1545  bucket *
1546  get() const
1547  {
1548  return my_bucket;
1549  }
1550 
1551  private:
1552  bucket *my_bucket;
1553  };
1554 
1555  void
1556  advance_to_next_bucket()
1557  {
1558  size_t k = my_index + 1;
1559 
1560  assert(my_bucket);
1561 
1562  while (k <= my_map->mask()) {
1563  bucket_accessor acc(my_map, k);
1564  my_bucket = acc.get();
1565 
1566  if (my_bucket->node_list) {
1567  my_node = static_cast<node *>(
1568  my_bucket->node_list.get(
1569  my_map->my_pool_uuid));
1570 
1571  my_index = k;
1572 
1573  return;
1574  }
1575 
1576  ++k;
1577  }
1578 
1579  my_bucket = 0;
1580  my_node = 0;
1581  my_index = k;
1582  }
1583 };
1584 
1585 template <typename Container, bool M, bool U>
1586 bool
1587 operator==(const hash_map_iterator<Container, M> &i,
1588  const hash_map_iterator<Container, U> &j)
1589 {
1590  return i.my_node == j.my_node && i.my_map == j.my_map;
1591 }
1592 
1593 template <typename Container, bool M, bool U>
1594 bool
1595 operator!=(const hash_map_iterator<Container, M> &i,
1596  const hash_map_iterator<Container, U> &j)
1597 {
1598  return i.my_node != j.my_node || i.my_map != j.my_map;
1599 }
1600 } /* namespace concurrent_hash_map_internal */
1649 template <typename Key, typename T, typename Hash, typename KeyEqual,
1650  typename MutexType, typename ScopedLockType>
1652  : protected concurrent_hash_map_internal::hash_map_base<Key, T, MutexType,
1653  ScopedLockType> {
1654  template <typename Container, bool is_const>
1655  friend class concurrent_hash_map_internal::hash_map_iterator;
1656 
1657 public:
1658  using size_type = typename concurrent_hash_map_internal::hash_map_base<
1659  Key, T, MutexType, ScopedLockType>::size_type;
1660  using hashcode_type =
1661  typename concurrent_hash_map_internal::hash_map_base<
1662  Key, T, MutexType, ScopedLockType>::hashcode_type;
1663  using key_type = Key;
1664  using mapped_type = T;
1665  using value_type = typename concurrent_hash_map_internal::hash_map_base<
1666  Key, T, MutexType, ScopedLockType>::node::value_type;
1667  using difference_type = ptrdiff_t;
1668  using pointer = value_type *;
1669  using const_pointer = const value_type *;
1670  using reference = value_type &;
1671  using const_reference = const value_type &;
1672  using iterator = concurrent_hash_map_internal::hash_map_iterator<
1673  concurrent_hash_map, false>;
1674  using const_iterator = concurrent_hash_map_internal::hash_map_iterator<
1675  concurrent_hash_map, true>;
1676  using hasher = Hash;
1677  using key_equal = typename concurrent_hash_map_internal::key_equal_type<
1678  Hash, KeyEqual>::type;
1679 
1680 protected:
1681  using mutex_t = MutexType;
1682  using scoped_t = ScopedLockType;
1683  /*
1684  * Explicitly use methods and types from template base class
1685  */
1686  using hash_map_base =
1687  concurrent_hash_map_internal::hash_map_base<Key, T, mutex_t,
1688  scoped_t>;
1689  using hash_map_base::calculate_mask;
1690  using hash_map_base::check_growth;
1691  using hash_map_base::check_mask_race;
1692  using hash_map_base::embedded_buckets;
1693  using hash_map_base::FEATURE_CONSISTENT_SIZE;
1694  using hash_map_base::get_bucket;
1695  using hash_map_base::get_pool_base;
1696  using hash_map_base::header_features;
1697  using hash_map_base::insert_new_node;
1698  using hash_map_base::internal_swap;
1699  using hash_map_base::layout_features;
1700  using hash_map_base::mask;
1701  using hash_map_base::reserve;
1702  using tls_t = typename hash_map_base::tls_t;
1703  using node = typename hash_map_base::node;
1704  using node_mutex_t = typename node::mutex_t;
1705  using node_ptr_t = typename hash_map_base::node_ptr_t;
1706  using bucket = typename hash_map_base::bucket;
1707  using bucket_lock_type = typename bucket::scoped_t;
1708  using segment_index_t = typename hash_map_base::segment_index_t;
1709  using segment_traits_t = typename hash_map_base::segment_traits_t;
1710  using segment_facade_t = typename hash_map_base::segment_facade_t;
1711  using scoped_lock_traits_type =
1712  concurrent_hash_map_internal::scoped_lock_traits<scoped_t>;
1713 
1714  friend class const_accessor;
1715  using persistent_node_ptr_t = detail::persistent_pool_ptr<node>;
1716 
1717  void
1718  delete_node(const node_ptr_t &n)
1719  {
1720  delete_persistent<node>(
1721  detail::static_persistent_pool_pointer_cast<node>(n)
1722  .get_persistent_ptr(this->my_pool_uuid));
1723  }
1724 
1725  template <typename K>
1726  persistent_node_ptr_t
1727  search_bucket(const K &key, bucket *b) const
1728  {
1729  assert(b->is_rehashed(std::memory_order_relaxed));
1730 
1731  persistent_node_ptr_t n =
1732  detail::static_persistent_pool_pointer_cast<node>(
1733  b->node_list);
1734 
1735  while (n &&
1736  !key_equal{}(key,
1737  n.get(this->my_pool_uuid)->item.first)) {
1738  n = detail::static_persistent_pool_pointer_cast<node>(
1739  n.get(this->my_pool_uuid)->next);
1740  }
1741 
1742  return n;
1743  }
1744 
1749  class bucket_accessor : public bucket_lock_type {
1750  bucket *my_b;
1751 
1752  public:
1753  bucket_accessor(bucket_accessor &&b) noexcept : my_b(b.my_b)
1754  {
1755  bucket_lock_type::mutex = b.bucket_lock_type::mutex;
1756  bucket_lock_type::is_writer =
1757  b.bucket_lock_type::is_writer;
1758  b.my_b = nullptr;
1759  b.bucket_lock_type::mutex = nullptr;
1760  b.bucket_lock_type::is_writer = false;
1761  }
1762 
1764  const hashcode_type h, bool writer = false)
1765  {
1766  acquire(base, h, writer);
1767  }
1768 
1773  inline void
1774  acquire(concurrent_hash_map *base, const hashcode_type h,
1775  bool writer = false)
1776  {
1777  my_b = base->get_bucket(h);
1778 
1779  if (my_b->is_rehashed(std::memory_order_acquire) ==
1780  false &&
1781  bucket_lock_type::try_acquire(this->my_b->mutex,
1782  /*write=*/true)) {
1783  if (my_b->is_rehashed(
1784  std::memory_order_relaxed) ==
1785  false) {
1786  /* recursive rehashing */
1787  base->rehash_bucket<false>(my_b, h);
1788  }
1789  } else {
1790  bucket_lock_type::acquire(my_b->mutex, writer);
1791  }
1792 
1793  assert(my_b->is_rehashed(std::memory_order_relaxed));
1794  }
1795 
1799  bool
1800  is_writer() const
1801  {
1802  return bucket_lock_type::is_writer;
1803  }
1804 
1809  bucket *
1810  get() const
1811  {
1812  return my_b;
1813  }
1814 
1819  bucket *operator->() const
1820  {
1821  return this->get();
1822  }
1823  };
1824 
1829  bucket *my_b;
1830 
1831  public:
1833  const hashcode_type h,
1834  bool writer = false)
1835  {
1836  acquire(base, h, writer);
1837  }
1838 
1839  /*
1840  * Find a bucket by masked hashcode, optionally rehash
1841  */
1842  inline void
1843  acquire(concurrent_hash_map *base, const hashcode_type h,
1844  bool writer = false)
1845  {
1846  my_b = base->get_bucket(h);
1847 
1848  if (my_b->is_rehashed(std::memory_order_relaxed) ==
1849  false) {
1850  /* recursive rehashing */
1851  base->rehash_bucket<true>(my_b, h);
1852  }
1853 
1854  assert(my_b->is_rehashed(std::memory_order_relaxed));
1855  }
1856 
1863  bool
1864  is_writer() const
1865  {
1866  return true;
1867  }
1868 
1873  bucket *
1874  get() const
1875  {
1876  return my_b;
1877  }
1878 
1883  bucket *operator->() const
1884  {
1885  return this->get();
1886  }
1887  };
1888 
1889  hashcode_type
1890  get_hash_code(node_ptr_t &n)
1891  {
1892  return hasher{}(
1893  detail::static_persistent_pool_pointer_cast<node>(n)(
1894  this->my_pool_uuid)
1895  ->item.first);
1896  }
1897 
1898  template <bool serial>
1899  void
1900  rehash_bucket(bucket *b_new, const hashcode_type h)
1901  {
1902  using accessor_type = typename std::conditional<
1903  serial, serial_bucket_accessor, bucket_accessor>::type;
1904 
1905  using scoped_lock_traits_type =
1906  concurrent_hash_map_internal::scoped_lock_traits<
1907  accessor_type>;
1908 
1909  /* First two bucket should be always rehashed */
1910  assert(h > 1);
1911 
1912  pool_base pop = get_pool_base();
1913  node_ptr_t *p_new = &(b_new->node_list);
1914 
1915  /* This condition is only true when there was a failure just
1916  * before setting rehashed flag */
1917  if (*p_new != nullptr) {
1918  assert(!b_new->is_rehashed(std::memory_order_relaxed));
1919 
1920  b_new->set_rehashed(std::memory_order_relaxed);
1921  pop.persist(b_new->rehashed);
1922 
1923  return;
1924  }
1925 
1926  /* get parent mask from the topmost bit */
1927  hashcode_type mask = (1u << detail::Log2(h)) - 1;
1928  assert((h & mask) < h);
1929  accessor_type b_old(
1930  this, h & mask,
1931  scoped_lock_traits_type::initial_rw_state(true));
1932 
1933  pmem::obj::transaction::run(pop, [&] {
1934  /* get full mask for new bucket */
1935  mask = (mask << 1) | 1;
1936  assert((mask & (mask + 1)) == 0 && (h & mask) == h);
1937 
1938  restart:
1939  for (node_ptr_t *p_old = &(b_old->node_list),
1940  n = *p_old;
1941  n; n = *p_old) {
1942  hashcode_type c = get_hash_code(n);
1943 #ifndef NDEBUG
1944  hashcode_type bmask = h & (mask >> 1);
1945 
1946  bmask = bmask == 0
1947  ? 1 /* minimal mask of parent bucket */
1948  : (1u << (detail::Log2(bmask) + 1)) - 1;
1949 
1950  assert((c & bmask) == (h & bmask));
1951 #endif
1952 
1953  if ((c & mask) == h) {
1954  if (!b_old.is_writer() &&
1955  !scoped_lock_traits_type::
1956  upgrade_to_writer(b_old)) {
1957  goto restart;
1958  /* node ptr can be invalid due
1959  * to concurrent erase */
1960  }
1961 
1962  /* Add to new b_new */
1963  *p_new = n;
1964 
1965  /* exclude from b_old */
1966  *p_old = n(this->my_pool_uuid)->next;
1967 
1968  p_new = &(n(this->my_pool_uuid)->next);
1969  } else {
1970  /* iterate to next item */
1971  p_old = &(n(this->my_pool_uuid)->next);
1972  }
1973  }
1974 
1975  *p_new = nullptr;
1976  });
1977 
1978  /* mark rehashed */
1979  b_new->set_rehashed(std::memory_order_release);
1980  pop.persist(b_new->rehashed);
1981  }
1982 
1983  void
1984  check_incompat_features()
1985  {
1986  if (layout_features.incompat != header_features().incompat)
1987  throw pmem::layout_error(
1988  "Incompat flags mismatch, for more details go to: https://pmem.io/pmdk/cpp_obj/ \n");
1989 
1990  if ((layout_features.compat & FEATURE_CONSISTENT_SIZE) &&
1991  this->value_size != sizeof(value_type))
1992  throw pmem::layout_error(
1993  "Size of value_type is different than the one stored in the pool \n");
1994  }
1995 
1996 public:
1997  class accessor;
2002  : protected node::scoped_t /*which derived from no_copy*/ {
2003  friend class concurrent_hash_map<Key, T, Hash, KeyEqual,
2004  mutex_t, scoped_t>;
2005  friend class accessor;
2007  using node::scoped_t::try_acquire;
2008 
2009  public:
2013  using value_type =
2014  const typename concurrent_hash_map::value_type;
2015 
2020  bool
2021  empty() const
2022  {
2023  return !my_node;
2024  }
2025 
2032  void
2034  {
2035  concurrent_hash_map_internal::check_outside_tx();
2036 
2037  if (my_node) {
2038  node::scoped_t::release();
2039  my_node = 0;
2040  }
2041  }
2042 
2046  const_reference operator*() const
2047  {
2048  assert(my_node);
2049 
2050  return my_node->item;
2051  }
2052 
2056  const_pointer operator->() const
2057  {
2058  return &operator*();
2059  }
2060 
2066  const_accessor() : my_node(OID_NULL), my_hash()
2067  {
2068  concurrent_hash_map_internal::check_outside_tx();
2069  }
2070 
2075  {
2076  my_node = OID_NULL; // scoped lock's release() is called
2077  // in its destructor
2078  }
2079 
2080  protected:
2081  node_ptr_t my_node;
2082 
2083  hashcode_type my_hash;
2084  };
2085 
2090  class accessor : public const_accessor {
2091  public:
2093  using value_type = typename concurrent_hash_map::value_type;
2094 
2096  reference operator*() const
2097  {
2098  assert(this->my_node);
2099 
2100  return this->my_node->item;
2101  }
2102 
2104  pointer operator->() const
2105  {
2106  return &operator*();
2107  }
2108  };
2109 
2113  concurrent_hash_map() : hash_map_base()
2114  {
2116  }
2117 
2122  concurrent_hash_map(size_type n) : hash_map_base()
2123  {
2125 
2126  reserve(n);
2127  }
2128 
2132  concurrent_hash_map(const concurrent_hash_map &table) : hash_map_base()
2133  {
2135 
2136  reserve(table.size());
2137 
2138  internal_copy(table);
2139  }
2140 
2144  concurrent_hash_map(concurrent_hash_map &&table) : hash_map_base()
2145  {
2147 
2148  swap(table);
2149  }
2150 
2154  template <typename I>
2155  concurrent_hash_map(I first, I last)
2156  {
2158 
2159  reserve(static_cast<size_type>(std::distance(first, last)));
2160 
2161  internal_copy(first, last);
2162  }
2163 
2167  concurrent_hash_map(std::initializer_list<value_type> il)
2168  {
2170 
2171  reserve(il.size());
2172 
2173  internal_copy(il.begin(), il.end());
2174  }
2175 
2184  void
2186  {
2187  check_incompat_features();
2188 
2189  calculate_mask();
2190 
2191  /*
2192  * Handle case where hash_map was created without
2193  * FEATURE_CONSISTENT_SIZE.
2194  */
2195  if (!(layout_features.compat & FEATURE_CONSISTENT_SIZE)) {
2196  auto actual_size =
2197  std::distance(this->begin(), this->end());
2198  assert(actual_size >= 0);
2199 
2200  this->my_size = static_cast<size_t>(actual_size);
2201 
2202  auto pop = get_pool_base();
2203  transaction::run(pop, [&] {
2204  this->tls_ptr = make_persistent<tls_t>();
2205  this->on_init_size =
2206  static_cast<size_t>(actual_size);
2207  this->value_size = sizeof(value_type);
2208 
2209  layout_features.compat |=
2210  FEATURE_CONSISTENT_SIZE;
2211  });
2212  } else {
2213  assert(this->tls_ptr != nullptr);
2214  this->tls_restore();
2215  }
2216 
2217  assert(this->size() ==
2218  size_type(std::distance(this->begin(), this->end())));
2219  }
2220 
2221  [[deprecated(
2222  "runtime_initialize(bool) is now deprecated, use runtime_initialize(void)")]] void
2223  runtime_initialize(bool graceful_shutdown)
2224  {
2225  check_incompat_features();
2226 
2227  calculate_mask();
2228 
2229  if (!graceful_shutdown) {
2230  auto actual_size =
2231  std::distance(this->begin(), this->end());
2232  assert(actual_size >= 0);
2233  this->my_size = static_cast<size_type>(actual_size);
2234  } else {
2235  assert(this->size() ==
2236  size_type(std::distance(this->begin(),
2237  this->end())));
2238  }
2239  }
2240 
2252  concurrent_hash_map &
2254  {
2255  if (this != &table) {
2256  clear();
2257  internal_copy(table);
2258  }
2259 
2260  return *this;
2261  }
2262 
2275  operator=(std::initializer_list<value_type> il)
2276  {
2277  clear();
2278 
2279  reserve(il.size());
2280 
2281  internal_copy(il.begin(), il.end());
2282 
2283  return *this;
2284  }
2285 
2294  void rehash(size_type n = 0);
2295 
2302  void clear();
2303 
2304  /*
2305  * Should be called before concurrent_hash_map destructor is called.
2306  * Otherwise, program can terminate if an exception occurs while freeing
2307  * memory inside dtor.
2308  *
2309  * Hash map can NOT be used after free_data() was called (unless this
2310  * was done in a transaction and transaction aborted).
2311  *
2312  * @throw std::transaction_error in case of PMDK transaction failure
2313  * @throw pmem::transaction_free_error when freeing underlying memory
2314  * failed.
2315  */
2316  void
2317  free_data()
2318  {
2319  if (!this->tls_ptr)
2320  return;
2321 
2322  auto pop = get_pool_base();
2323 
2324  transaction::run(pop, [&] {
2325  clear();
2326  this->free_tls();
2327  });
2328  }
2329 
2334  {
2335  free_data();
2336  }
2337 
2338  //------------------------------------------------------------------------
2339  // STL support - not thread-safe methods
2340  //------------------------------------------------------------------------
2341 
2348  iterator
2350  {
2351  return iterator(this, 0);
2352  }
2353 
2358  iterator
2360  {
2361  return iterator(this, mask() + 1);
2362  }
2363 
2368  const_iterator
2369  begin() const
2370  {
2371  return const_iterator(this, 0);
2372  }
2373 
2378  const_iterator
2379  end() const
2380  {
2381  return const_iterator(this, mask() + 1);
2382  }
2383 
2387  size_type
2388  size() const
2389  {
2390  return hash_map_base::size();
2391  }
2392 
2396  bool
2397  empty() const
2398  {
2399  return this->size() == 0;
2400  }
2401 
2405  size_type
2406  max_size() const
2407  {
2408  return (~size_type(0)) / sizeof(node);
2409  }
2410 
2414  size_type
2416  {
2417  return mask() + 1;
2418  }
2419 
2423  void swap(concurrent_hash_map &table);
2424 
2425  //------------------------------------------------------------------------
2426  // concurrent map operations
2427  //------------------------------------------------------------------------
2428 
2434  size_type
2435  count(const Key &key) const
2436  {
2437  concurrent_hash_map_internal::check_outside_tx();
2438 
2439  return const_cast<concurrent_hash_map *>(this)->internal_find(
2440  key, nullptr, false);
2441  }
2442 
2454  template <typename K,
2455  typename = typename std::enable_if<
2456  concurrent_hash_map_internal::
2457  has_transparent_key_equal<hasher>::value,
2458  K>::type>
2459  size_type
2460  count(const K &key) const
2461  {
2462  concurrent_hash_map_internal::check_outside_tx();
2463 
2464  return const_cast<concurrent_hash_map *>(this)->internal_find(
2465  key, nullptr, false);
2466  }
2467 
2474  bool
2475  find(const_accessor &result, const Key &key) const
2476  {
2477  concurrent_hash_map_internal::check_outside_tx();
2478 
2479  result.release();
2480 
2481  return const_cast<concurrent_hash_map *>(this)->internal_find(
2482  key, &result, false);
2483  }
2484 
2498  template <typename K,
2499  typename = typename std::enable_if<
2500  concurrent_hash_map_internal::
2501  has_transparent_key_equal<hasher>::value,
2502  K>::type>
2503  bool
2504  find(const_accessor &result, const K &key) const
2505  {
2506  concurrent_hash_map_internal::check_outside_tx();
2507 
2508  result.release();
2509 
2510  return const_cast<concurrent_hash_map *>(this)->internal_find(
2511  key, &result, false);
2512  }
2513 
2520  bool
2521  find(accessor &result, const Key &key)
2522  {
2523  concurrent_hash_map_internal::check_outside_tx();
2524 
2525  result.release();
2526 
2527  return internal_find(key, &result, true);
2528  }
2529 
2543  template <typename K,
2544  typename = typename std::enable_if<
2545  concurrent_hash_map_internal::
2546  has_transparent_key_equal<hasher>::value,
2547  K>::type>
2548  bool
2549  find(accessor &result, const K &key)
2550  {
2551  concurrent_hash_map_internal::check_outside_tx();
2552 
2553  result.release();
2554 
2555  return internal_find(key, &result, true);
2556  }
2564  bool
2565  insert(const_accessor &result, const Key &key)
2566  {
2567  concurrent_hash_map_internal::check_outside_tx();
2568 
2569  result.release();
2570 
2571  return internal_insert(key, &result, false, key);
2572  }
2573 
2581  bool
2582  insert(accessor &result, const Key &key)
2583  {
2584  concurrent_hash_map_internal::check_outside_tx();
2585 
2586  result.release();
2587 
2588  return internal_insert(key, &result, true, key);
2589  }
2590 
2598  bool
2599  insert(const_accessor &result, const value_type &value)
2600  {
2601  concurrent_hash_map_internal::check_outside_tx();
2602 
2603  result.release();
2604 
2605  return internal_insert(value.first, &result, false, value);
2606  }
2607 
2615  bool
2616  insert(accessor &result, const value_type &value)
2617  {
2618  concurrent_hash_map_internal::check_outside_tx();
2619 
2620  result.release();
2621 
2622  return internal_insert(value.first, &result, true, value);
2623  }
2624 
2631  bool
2632  insert(const value_type &value)
2633  {
2634  concurrent_hash_map_internal::check_outside_tx();
2635 
2636  return internal_insert(value.first, nullptr, false, value);
2637  }
2638 
2646  bool
2647  insert(const_accessor &result, value_type &&value)
2648  {
2649  concurrent_hash_map_internal::check_outside_tx();
2650 
2651  result.release();
2652 
2653  return internal_insert(value.first, &result, false,
2654  std::move(value));
2655  }
2656 
2664  bool
2665  insert(accessor &result, value_type &&value)
2666  {
2667  concurrent_hash_map_internal::check_outside_tx();
2668 
2669  result.release();
2670 
2671  return internal_insert(value.first, &result, true,
2672  std::move(value));
2673  }
2674 
2681  bool
2682  insert(value_type &&value)
2683  {
2684  concurrent_hash_map_internal::check_outside_tx();
2685 
2686  return internal_insert(value.first, nullptr, false,
2687  std::move(value));
2688  }
2689 
2695  template <typename I>
2696  void
2697  insert(I first, I last)
2698  {
2699  concurrent_hash_map_internal::check_outside_tx();
2700 
2701  for (; first != last; ++first)
2702  insert(*first);
2703  }
2704 
2710  void
2711  insert(std::initializer_list<value_type> il)
2712  {
2713  concurrent_hash_map_internal::check_outside_tx();
2714 
2715  insert(il.begin(), il.end());
2716  }
2717 
2726  template <typename M>
2727  bool
2728  insert_or_assign(const key_type &key, M &&obj)
2729  {
2730  concurrent_hash_map_internal::check_outside_tx();
2731 
2732  accessor acc;
2733  auto result = internal_insert(key, &acc, true, key,
2734  std::forward<M>(obj));
2735 
2736  if (!result) {
2737  pool_base pop = get_pool_base();
2739  acc->second = std::forward<M>(obj);
2741  }
2742 
2743  return result;
2744  }
2745 
2754  template <typename M>
2755  bool
2756  insert_or_assign(key_type &&key, M &&obj)
2757  {
2758  concurrent_hash_map_internal::check_outside_tx();
2759 
2760  accessor acc;
2761  auto result = internal_insert(key, &acc, true, std::move(key),
2762  std::forward<M>(obj));
2763 
2764  if (!result) {
2765  pool_base pop = get_pool_base();
2767  acc->second = std::forward<M>(obj);
2769  }
2770 
2771  return result;
2772  }
2773 
2782  template <
2783  typename K, typename M,
2784  typename = typename std::enable_if<
2785  concurrent_hash_map_internal::has_transparent_key_equal<
2786  hasher>::value &&
2787  std::is_constructible<key_type, K>::value,
2788  K>::type>
2789  bool
2790  insert_or_assign(K &&key, M &&obj)
2791  {
2792  concurrent_hash_map_internal::check_outside_tx();
2793 
2794  accessor acc;
2795  auto result =
2796  internal_insert(key, &acc, true, std::forward<K>(key),
2797  std::forward<M>(obj));
2798 
2799  if (!result) {
2800  pool_base pop = get_pool_base();
2802  acc->second = std::forward<M>(obj);
2804  }
2805 
2806  return result;
2807  }
2808 
2817  bool
2818  erase(const Key &key)
2819  {
2820  concurrent_hash_map_internal::check_outside_tx();
2821 
2822  return internal_erase(key);
2823  }
2824 
2843  pobj_defrag_result
2844  defragment(double start_percent = 0, double amount_percent = 100)
2845  {
2846  double end_percent = start_percent + amount_percent;
2847  if (start_percent < 0 || start_percent >= 100 ||
2848  end_percent < 0 || end_percent > 100 ||
2849  start_percent >= end_percent) {
2850  throw std::range_error("incorrect range");
2851  }
2852 
2853  size_t max_index = mask().load(std::memory_order_acquire);
2854  size_t start_index =
2855  static_cast<size_t>((start_percent * max_index) / 100);
2856  size_t end_index =
2857  static_cast<size_t>((end_percent * max_index) / 100);
2858 
2859  /* Make sure we do not use too big index, even in case of
2860  * rounding errors. */
2861  end_index = (std::min)(end_index, max_index);
2862 
2863 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
2864  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
2865 #endif
2866 
2867  /* Create defrag object for elements in the current pool */
2868  pmem::obj::defrag my_defrag(this->get_pool_base());
2869  mutex_vector mv;
2870 
2871  /*
2872  * Locks are taken in the backward order to avoid deadlocks
2873  * with the rehashing of buckets.
2874  *
2875  * We do '+ 1' and '- 1' to handle the 'i == 0' case.
2876  */
2877  for (size_t i = end_index + 1; i >= start_index + 1; i--) {
2878  /*
2879  * All locks will be unlocked automatically
2880  * in the destructor of 'mv'.
2881  */
2882  bucket *b = mv.push_and_try_lock(this, i - 1);
2883  if (b == nullptr)
2884  continue;
2885 
2886  defrag_save_nodes(b, my_defrag);
2887  }
2888 
2889  return my_defrag.run();
2890  }
2891 
2906  template <typename K,
2907  typename = typename std::enable_if<
2908  concurrent_hash_map_internal::
2909  has_transparent_key_equal<hasher>::value,
2910  K>::type>
2911  bool
2912  erase(const K &key)
2913  {
2914  concurrent_hash_map_internal::check_outside_tx();
2915 
2916  return internal_erase(key);
2917  }
2918 
2919 protected:
2920  /*
2921  * Try to acquire the mutex for read or write.
2922  *
2923  * If acquiring succeeds returns true, otherwise retries for few times.
2924  * If acquiring fails after all attempts returns false.
2925  */
2926  bool try_acquire_item(const_accessor *result, node_mutex_t &mutex,
2927  bool write);
2928 
2934  public:
2935  using mutex_t = MutexType;
2936 
2938  bucket *
2939  push_and_try_lock(concurrent_hash_map *base, hashcode_type h)
2940  {
2941  vec.emplace_back(base, h, true /*writer*/);
2942  bucket *b = vec.back().get();
2943 
2944  auto node_ptr = static_cast<node *>(
2945  b->node_list.get(base->my_pool_uuid));
2946 
2947  while (node_ptr) {
2948  const_accessor ca;
2949  if (!base->try_acquire_item(&ca,
2950  node_ptr->mutex,
2951  /*write=*/true)) {
2952  vec.pop_back();
2953  return nullptr;
2954  }
2955 
2956  node_ptr =
2957  static_cast<node *>(node_ptr->next.get(
2958  (base->my_pool_uuid)));
2959  }
2960 
2961  return b;
2962  }
2963 
2964  private:
2965  std::vector<bucket_accessor> vec;
2966  };
2967 
2968  template <typename K>
2969  bool internal_find(const K &key, const_accessor *result, bool write);
2970 
2971  template <typename K, typename... Args>
2972  bool internal_insert(const K &key, const_accessor *result, bool write,
2973  Args &&... args);
2974 
2975  /* Obtain pointer to node and lock bucket */
2976  template <bool Bucket_rw_lock, typename K>
2977  persistent_node_ptr_t
2978  get_node(const K &key, bucket_accessor &b)
2979  {
2980  /* find a node */
2981  auto n = search_bucket(key, b.get());
2982 
2983  if (!n) {
2984  if (Bucket_rw_lock && !b.is_writer() &&
2985  !scoped_lock_traits_type::upgrade_to_writer(b)) {
2986  /* Rerun search_list, in case another
2987  * thread inserted the item during the
2988  * upgrade. */
2989  n = search_bucket(key, b.get());
2990  if (n) {
2991  /* unfortunately, it did */
2992  scoped_lock_traits_type::
2993  downgrade_to_reader(b);
2994  return n;
2995  }
2996  }
2997  }
2998 
2999  return n;
3000  }
3001 
3002  template <typename K>
3003  bool internal_erase(const K &key);
3004 
3005  void clear_segment(segment_index_t s);
3006 
3010  void internal_copy(const concurrent_hash_map &source);
3011 
3012  template <typename I>
3013  void internal_copy(I first, I last);
3014 
3019  void
3021  {
3022  auto node_ptr = static_cast<node *>(
3023  b->node_list.get(this->my_pool_uuid));
3024 
3025  while (node_ptr) {
3026  /*
3027  * We do not perform the defragmentation
3028  * on node pointers, because nodes
3029  * always have the same size.
3030  */
3031  defrag.add(node_ptr->item.first);
3032  defrag.add(node_ptr->item.second);
3033 
3034  node_ptr = static_cast<node *>(
3035  node_ptr->next.get((this->my_pool_uuid)));
3036  }
3037  }
3038 }; // class concurrent_hash_map
3039 
3040 template <typename Key, typename T, typename Hash, typename KeyEqual,
3041  typename MutexType, typename ScopedLockType>
3042 bool
3043 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3044  ScopedLockType>::try_acquire_item(const_accessor *result,
3045  node_mutex_t &mutex,
3046  bool write)
3047 {
3048  /* acquire the item */
3049  if (!result->try_acquire(mutex, write)) {
3050  for (detail::atomic_backoff backoff(true);;) {
3051  if (result->try_acquire(mutex, write))
3052  break;
3053 
3054  if (!backoff.bounded_pause())
3055  return false;
3056  }
3057  }
3058 
3059  return true;
3060 }
3061 
3062 template <typename Key, typename T, typename Hash, typename KeyEqual,
3063  typename MutexType, typename ScopedLockType>
3064 template <typename K>
3065 bool
3066 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3067  ScopedLockType>::internal_find(const K &key,
3068  const_accessor *result,
3069  bool write)
3070 {
3071  assert(!result || !result->my_node);
3072 
3073  hashcode_type m = mask().load(std::memory_order_acquire);
3074 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3075  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3076 #endif
3077 
3078  assert((m & (m + 1)) == 0);
3079 
3080  hashcode_type const h = hasher{}(key);
3081 
3082  persistent_node_ptr_t node;
3083 
3084  while (true) {
3085  /* get bucket and acquire the lock */
3086  bucket_accessor b(
3087  this, h & m,
3088  scoped_lock_traits_type::initial_rw_state(false));
3089  node = get_node<false>(key, b);
3090 
3091  if (!node) {
3092  /* Element was possibly relocated, try again */
3093  if (check_mask_race(h, m)) {
3094  b.release();
3095  continue;
3096  } else {
3097  return false;
3098  }
3099  }
3100 
3101  /* No need to acquire the item or item acquired */
3102  if (!result ||
3103  try_acquire_item(
3104  result, node.get(this->my_pool_uuid)->mutex, write))
3105  break;
3106 
3107  /* the wait takes really long, restart the
3108  * operation */
3109  b.release();
3110 
3111  std::this_thread::yield();
3112 
3113  m = mask().load(std::memory_order_acquire);
3114 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3115  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3116 #endif
3117  }
3118 
3119  if (result) {
3120  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3121  result->my_hash = h;
3122  }
3123 
3124  return true;
3125 }
3126 
3127 template <typename Key, typename T, typename Hash, typename KeyEqual,
3128  typename MutexType, typename ScopedLockType>
3129 template <typename K, typename... Args>
3130 bool
3131 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3132  ScopedLockType>::internal_insert(const K &key,
3133  const_accessor *result,
3134  bool write,
3135  Args &&... args)
3136 {
3137  assert(!result || !result->my_node);
3138 
3139  hashcode_type m = mask().load(std::memory_order_acquire);
3140 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3141  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3142 #endif
3143 
3144  assert((m & (m + 1)) == 0);
3145 
3146  hashcode_type const h = hasher{}(key);
3147 
3148  persistent_node_ptr_t node;
3149  size_t new_size = 0;
3150  bool inserted = false;
3151 
3152  while (true) {
3153  /* get bucket and acquire the lock */
3154  bucket_accessor b(
3155  this, h & m,
3156  scoped_lock_traits_type::initial_rw_state(true));
3157  node = get_node<true>(key, b);
3158 
3159  if (!node) {
3160  /* Element was possibly relocated, try again */
3161  if (check_mask_race(h, m)) {
3162  b.release();
3163  continue;
3164  }
3165 
3166  /* insert and set flag to grow the container */
3167  new_size = insert_new_node(b.get(), node,
3168  std::forward<Args>(args)...);
3169  inserted = true;
3170  }
3171 
3172  /* No need to acquire the item or item acquired */
3173  if (!result ||
3174  try_acquire_item(
3175  result, node.get(this->my_pool_uuid)->mutex, write))
3176  break;
3177 
3178  /* the wait takes really long, restart the
3179  * operation */
3180  b.release();
3181 
3182  std::this_thread::yield();
3183 
3184  m = mask().load(std::memory_order_acquire);
3185 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3186  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3187 #endif
3188  }
3189 
3190  if (result) {
3191  result->my_node = node.get_persistent_ptr(this->my_pool_uuid);
3192  result->my_hash = h;
3193  }
3194 
3195  check_growth(m, new_size);
3196 
3197  return inserted;
3198 }
3199 
3200 template <typename Key, typename T, typename Hash, typename KeyEqual,
3201  typename MutexType, typename ScopedLockType>
3202 template <typename K>
3203 bool
3204 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3205  ScopedLockType>::internal_erase(const K &key)
3206 {
3207  node_ptr_t n;
3208  hashcode_type const h = hasher{}(key);
3209  hashcode_type m = mask().load(std::memory_order_acquire);
3210 #if LIBPMEMOBJ_CPP_VG_HELGRIND_ENABLED
3211  ANNOTATE_HAPPENS_AFTER(&(this->my_mask));
3212 #endif
3213 
3214  pool_base pop = get_pool_base();
3215 
3216 restart : {
3217  /* lock scope */
3218  /* get bucket */
3219  bucket_accessor b(this, h & m,
3220  scoped_lock_traits_type::initial_rw_state(true));
3221 
3222 search:
3223  node_ptr_t *p = &b->node_list;
3224  n = *p;
3225 
3226  while (n &&
3227  !key_equal{}(key,
3228  detail::static_persistent_pool_pointer_cast<node>(
3229  n)(this->my_pool_uuid)
3230  ->item.first)) {
3231  p = &n(this->my_pool_uuid)->next;
3232  n = *p;
3233  }
3234 
3235  if (!n) {
3236  /* not found, but mask could be changed */
3237  if (check_mask_race(h, m))
3238  goto restart;
3239 
3240  return false;
3241  } else if (!b.is_writer() &&
3242  !scoped_lock_traits_type::upgrade_to_writer(b)) {
3243  if (check_mask_race(h, m)) /* contended upgrade, check mask */
3244  goto restart;
3245 
3246  goto search;
3247  }
3248 
3249  persistent_ptr<node> del = n(this->my_pool_uuid);
3250 
3251  {
3252  /* We cannot remove this element immediately because
3253  * other threads might work with this element via
3254  * accessors. The item_locker required to wait while
3255  * other threads use the node. */
3256  const_accessor acc;
3257  if (!try_acquire_item(&acc, del->mutex, true)) {
3258  /* the wait takes really long, restart the operation */
3259  b.release();
3260 
3261  std::this_thread::yield();
3262 
3263  m = mask().load(std::memory_order_acquire);
3264 
3265  goto restart;
3266  }
3267  }
3268 
3269  assert(pmemobj_tx_stage() == TX_STAGE_NONE);
3270 
3271  auto &size_diff = this->thread_size_diff();
3272 
3273  /* Only one thread can delete it due to write lock on the bucket
3274  */
3275  transaction::run(pop, [&] {
3276  *p = del->next;
3277  delete_node(del);
3278 
3279  --size_diff;
3280  });
3281 
3282  --(this->my_size);
3283 }
3284 
3285  return true;
3286 }
3287 
3288 template <typename Key, typename T, typename Hash, typename KeyEqual,
3289  typename MutexType, typename ScopedLockType>
3290 void
3293 {
3294  internal_swap(table);
3295 }
3296 
3297 template <typename Key, typename T, typename Hash, typename KeyEqual,
3298  typename MutexType, typename ScopedLockType>
3299 void
3301  size_type sz)
3302 {
3303  concurrent_hash_map_internal::check_outside_tx();
3304 
3305  reserve(sz);
3306  hashcode_type m = mask();
3307 
3308  /* only the last segment should be scanned for rehashing size or first
3309  * index of the last segment */
3310  hashcode_type b = (m + 1) >> 1;
3311 
3312  /* zero or power of 2 */
3313  assert((b & (b - 1)) == 0);
3314 
3315  for (; b <= m; ++b) {
3316  bucket *bp = get_bucket(b);
3317 
3318  concurrent_hash_map_internal::assert_not_locked<mutex_t,
3319  scoped_t>(
3320  bp->mutex);
3321  /* XXX Need to investigate if this statement is needed */
3322  if (bp->is_rehashed(std::memory_order_relaxed) == false)
3323  rehash_bucket<true>(bp, b);
3324  }
3325 }
3326 
3327 template <typename Key, typename T, typename Hash, typename KeyEqual,
3328  typename MutexType, typename ScopedLockType>
3329 void
3331 {
3332  hashcode_type m = mask();
3333 
3334  assert((m & (m + 1)) == 0);
3335 
3336 #ifndef NDEBUG
3337  /* check consistency */
3338  for (segment_index_t b = 0; b <= m; ++b) {
3339  bucket *bp = get_bucket(b);
3340  concurrent_hash_map_internal::assert_not_locked<mutex_t,
3341  scoped_t>(
3342  bp->mutex);
3343  }
3344 #endif
3345 
3346  pool_base pop = get_pool_base();
3347  { /* transaction scope */
3348 
3349  transaction::manual tx(pop);
3350 
3351  assert(this->tls_ptr != nullptr);
3352  this->tls_ptr->clear();
3353 
3354  this->on_init_size = 0;
3355 
3356  segment_index_t s = segment_traits_t::segment_index_of(m);
3357 
3358  assert(s + 1 == this->block_table_size ||
3359  !segment_facade_t(this->my_table, s + 1).is_valid());
3360 
3361  do {
3362  clear_segment(s);
3363  } while (s-- > 0);
3364 
3365  /*
3366  * As clear can only be called
3367  * from one thread, and there can be an outer
3368  * transaction we must make sure that mask and size
3369  * changes are transactional
3370  */
3371  transaction::snapshot((size_t *)&this->my_mask);
3372  transaction::snapshot((size_t *)&this->my_size);
3373 
3374  mask().store(embedded_buckets - 1, std::memory_order_relaxed);
3375  this->my_size = 0;
3376 
3378  }
3379 }
3380 
3381 template <typename Key, typename T, typename Hash, typename KeyEqual,
3382  typename MutexType, typename ScopedLockType>
3383 void
3384 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3385  ScopedLockType>::clear_segment(segment_index_t s)
3386 {
3387  segment_facade_t segment(this->my_table, s);
3388 
3389  assert(segment.is_valid());
3390 
3391  size_type sz = segment.size();
3392  for (segment_index_t i = 0; i < sz; ++i) {
3393  for (node_ptr_t n = segment[i].node_list; n;
3394  n = segment[i].node_list) {
3395  segment[i].node_list = n(this->my_pool_uuid)->next;
3396  delete_node(n);
3397  }
3398  }
3399 
3400  if (s >= segment_traits_t::embedded_segments)
3401  segment.disable();
3402 }
3403 
3404 template <typename Key, typename T, typename Hash, typename KeyEqual,
3405  typename MutexType, typename ScopedLockType>
3406 void
3408  internal_copy(const concurrent_hash_map &source)
3409 {
3410  auto pop = get_pool_base();
3411 
3412  reserve(source.size());
3413  internal_copy(source.begin(), source.end());
3414 }
3415 
3416 template <typename Key, typename T, typename Hash, typename KeyEqual,
3417  typename MutexType, typename ScopedLockType>
3418 template <typename I>
3419 void
3420 concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3421  ScopedLockType>::internal_copy(I first, I last)
3422 {
3423  hashcode_type m = mask();
3424 
3425  for (; first != last; ++first) {
3426  hashcode_type h = hasher{}(first->first);
3427  bucket *b = get_bucket(h & m);
3428 
3429  assert(b->is_rehashed(std::memory_order_relaxed));
3430 
3431  detail::persistent_pool_ptr<node> p;
3432  insert_new_node(b, p, *first);
3433  }
3434 }
3435 
3436 template <typename Key, typename T, typename Hash, typename KeyEqual,
3437  typename MutexType, typename ScopedLockType>
3438 inline bool
3439 operator==(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3440  ScopedLockType> &a,
3441  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3442  ScopedLockType> &b)
3443 {
3444  if (a.size() != b.size())
3445  return false;
3446 
3447  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3448  ScopedLockType>::const_iterator
3449  i(a.begin()),
3450  i_end(a.end());
3451 
3452  typename concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3453  ScopedLockType>::const_iterator j,
3454  j_end(b.end());
3455 
3456  for (; i != i_end; ++i) {
3457  j = b.equal_range(i->first).first;
3458 
3459  if (j == j_end || !(i->second == j->second))
3460  return false;
3461  }
3462 
3463  return true;
3464 }
3465 
3466 template <typename Key, typename T, typename Hash, typename KeyEqual,
3467  typename MutexType, typename ScopedLockType>
3468 inline bool
3469 operator!=(const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3470  ScopedLockType> &a,
3471  const concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType,
3472  ScopedLockType> &b)
3473 {
3474  return !(a == b);
3475 }
3476 
3477 template <typename Key, typename T, typename Hash, typename KeyEqual,
3478  typename MutexType, typename ScopedLockType>
3479 inline void
3480 swap(concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &a,
3481  concurrent_hash_map<Key, T, Hash, KeyEqual, MutexType, ScopedLockType> &b)
3482 {
3483  a.swap(b);
3484 }
3485 
3486 } /* namespace obj */
3487 } /* namespace pmem */
3488 
3489 #endif /* PMEMOBJ_CONCURRENT_HASH_MAP_HPP */
pmem::obj::concurrent_hash_map::clear
void clear()
Clear hash map content Not thread safe.
Definition: concurrent_hash_map.hpp:3330
pmem::obj::concurrent_hash_map::insert
bool insert(value_type &&value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2682
pmem::obj::transaction::commit
static void commit()
Manually commit a transaction.
Definition: transaction.hpp:369
pmem::obj::operator+
persistent_ptr< T > operator+(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Addition operator for persistent pointers.
Definition: persistent_ptr.hpp:868
pmem::obj::mutex
Persistent memory resident mutex implementation.
Definition: mutex.hpp:60
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(concurrent_hash_map &&table)
Move constructor.
Definition: concurrent_hash_map.hpp:2144
pmem::obj::concurrent_hash_map::erase
bool erase(const Key &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2818
pmem::obj::concurrent_hash_map::bucket_accessor
Bucket accessor is to find, rehash, acquire a lock, and access a bucket.
Definition: concurrent_hash_map.hpp:1749
pmem::obj::concurrent_hash_map::const_accessor::release
void release()
Release accessor.
Definition: concurrent_hash_map.hpp:2033
pmem::obj::concurrent_hash_map::bucket_accessor::is_writer
bool is_writer() const
Check whether bucket is locked for write.
Definition: concurrent_hash_map.hpp:1800
pmem::obj::concurrent_hash_map::count
size_type count(const Key &key) const
Definition: concurrent_hash_map.hpp:2435
pmem::obj::concurrent_hash_map::end
iterator end()
Definition: concurrent_hash_map.hpp:2359
pmem::obj::p::get_ro
const T & get_ro() const noexcept
Retrieves read-only const reference of the object.
Definition: p.hpp:157
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2665
pmem
Persistent memory namespace.
Definition: allocation_flag.hpp:44
pmem::obj::concurrent_hash_map::~concurrent_hash_map
~concurrent_hash_map()
Clear table and destroy it.
Definition: concurrent_hash_map.hpp:2333
pmem::obj::concurrent_hash_map::operator=
concurrent_hash_map & operator=(const concurrent_hash_map &table)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2253
pmem::obj::begin
pmem::obj::array< T, N >::iterator begin(pmem::obj::array< T, N > &a)
Non-member begin.
Definition: array.hpp:833
pmem::obj::concurrent_hash_map::const_accessor::operator*
const_reference operator*() const
Definition: concurrent_hash_map.hpp:2046
pmem::obj::concurrent_hash_map::accessor
Allows write access to elements and combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:2090
common.hpp
Commonly used functionality.
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2599
template_helpers.hpp
Commonly used SFINAE helpers.
pmem::obj::concurrent_hash_map::insert_or_assign
bool insert_or_assign(key_type &&key, M &&obj)
Inserts item if there is no such key present already, assigns provided value otherwise.
Definition: concurrent_hash_map.hpp:2756
pmem::obj::concurrent_hash_map::runtime_initialize
void runtime_initialize()
Initialize persistent concurrent hash map after process restart.
Definition: concurrent_hash_map.hpp:2185
pmem::obj::transaction::snapshot
static void snapshot(const T *addr, size_t num=1)
Takes a “snapshot” of given elements of type T number (1 by default), located at the given address pt...
Definition: transaction.hpp:513
pmem::obj::concurrent_hash_map::serial_bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1883
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, value_type &&value)
Insert item by copying if there is no such key present already and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2647
pmem::obj::operator++
p< T > & operator++(p< T > &pp)
Prefix increment operator overload.
Definition: pext.hpp:77
pmem::obj::concurrent_hash_map::find
bool find(accessor &result, const K &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2549
pmem::obj::operator==
bool operator==(standard_alloc_policy< T > const &, standard_alloc_policy< T2 > const &)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:402
pmem::obj::concurrent_hash_map::serial_bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1874
pmem::obj::p
Resides on pmem class.
Definition: p.hpp:64
pmem::obj::defrag
Defrag class.
Definition: defrag.hpp:112
pmem::obj::concurrent_hash_map::mutex_vector::push_and_try_lock
bucket * push_and_try_lock(concurrent_hash_map *base, hashcode_type h)
Save pointer to the lock in the vector and lock it.
Definition: concurrent_hash_map.hpp:2939
pmem::obj::concurrent_hash_map::const_accessor::~const_accessor
~const_accessor()
Destroy result after releasing the underlying reference.
Definition: concurrent_hash_map.hpp:2074
pmem::obj::concurrent_hash_map::bucket_count
size_type bucket_count() const
Definition: concurrent_hash_map.hpp:2415
defrag.hpp
Defragmentation class.
pmem::obj::concurrent_hash_map::size
size_type size() const
Definition: concurrent_hash_map.hpp:2388
make_persistent.hpp
Persistent_ptr transactional allocation functions for objects.
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(std::initializer_list< value_type > il)
Construct table with initializer list.
Definition: concurrent_hash_map.hpp:2167
pmem::obj::concurrent_hash_map::mutex_vector
Vector of locks to be unlocked at the destruction time.
Definition: concurrent_hash_map.hpp:2933
pmem::obj::transaction::run
static void run(pool_base &pool, std::function< void()> tx, Locks &... locks)
Execute a closure-like transaction and lock locks.
Definition: transaction.hpp:422
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(size_type n)
Construct empty table with n preallocated buckets.
Definition: concurrent_hash_map.hpp:2122
pmem::obj::concurrent_hash_map::count
size_type count(const K &key) const
This overload only participates in overload resolution if the qualified-id Hash::transparent_key_equa...
Definition: concurrent_hash_map.hpp:2460
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, const Key &key)
Insert item (if not already present) and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2582
pmem::obj::concurrent_hash_map::find
bool find(accessor &result, const Key &key)
Find item and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2521
pmem::obj::operator-
persistent_ptr< T > operator-(persistent_ptr< T > const &lhs, std::ptrdiff_t s)
Subtraction operator for persistent pointers.
Definition: persistent_ptr.hpp:882
pmem::obj::swap
void swap(pmem::obj::array< T, N > &lhs, pmem::obj::array< T, N > &rhs)
Non-member swap function.
Definition: array.hpp:913
pmem::obj::operator+=
p< T > & operator+=(p< T > &lhs, const p< Y > &rhs)
Addition assignment operator overload.
Definition: pext.hpp:123
pmem::obj::concurrent_hash_map::insert
void insert(std::initializer_list< value_type > il)
Insert initializer list.
Definition: concurrent_hash_map.hpp:2711
pmem::obj::operator!=
bool operator!=(const allocator< T, P, Tr > &lhs, const OtherAllocator &rhs)
Determines if memory from another allocator can be deallocated from this one.
Definition: allocator.hpp:518
pmem::obj::defrag::run
pobj_defrag_result run()
Starts defragmentation with previously stored pointers.
Definition: defrag.hpp:217
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(const concurrent_hash_map &table)
Copy constructor.
Definition: concurrent_hash_map.hpp:2132
pmem::obj::concurrent_hash_map::max_size
size_type max_size() const
Upper bound on size.
Definition: concurrent_hash_map.hpp:2406
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map()
Construct empty table.
Definition: concurrent_hash_map.hpp:2113
pmem::obj::operator-=
p< T > & operator-=(p< T > &lhs, const p< Y > &rhs)
Subtraction assignment operator overload.
Definition: pext.hpp:145
transaction.hpp
C++ pmemobj transactions.
pmem::obj::concurrent_hash_map::begin
const_iterator begin() const
Definition: concurrent_hash_map.hpp:2369
pmem::obj::concurrent_hash_map::bucket_accessor::acquire
void acquire(concurrent_hash_map *base, const hashcode_type h, bool writer=false)
Find a bucket by masked hashcode, optionally rehash, and acquire the lock.
Definition: concurrent_hash_map.hpp:1774
pmem::obj::concurrent_hash_map::insert_or_assign
bool insert_or_assign(const key_type &key, M &&obj)
Inserts item if there is no such key present already, assigns provided value otherwise.
Definition: concurrent_hash_map.hpp:2728
pmem::obj::concurrent_hash_map::erase
bool erase(const K &key)
Remove element with corresponding key.
Definition: concurrent_hash_map.hpp:2912
pmem::obj::persistent_ptr< node >
shared_mutex.hpp
Pmem-resident shared mutex.
pmem::obj::concurrent_hash_map::insert_or_assign
bool insert_or_assign(K &&key, M &&obj)
Inserts item if there is no such key-comparable type present already, assigns provided value otherwis...
Definition: concurrent_hash_map.hpp:2790
pmem::obj::concurrent_hash_map::insert
bool insert(accessor &result, const value_type &value)
Insert item by copying if there is no such key present already and acquire a write lock on the item.
Definition: concurrent_hash_map.hpp:2616
p.hpp
Resides on pmem property template.
pmem::obj::concurrent_hash_map::const_accessor::operator->
const_pointer operator->() const
Definition: concurrent_hash_map.hpp:2056
pmem::obj::concurrent_hash_map::operator=
concurrent_hash_map & operator=(std::initializer_list< value_type > il)
Assignment Not thread safe.
Definition: concurrent_hash_map.hpp:2275
pmem::obj::concurrent_hash_map::accessor::operator->
pointer operator->() const
Return pointer to associated value in hash table.
Definition: concurrent_hash_map.hpp:2104
pmem::obj::get
T & get(pmem::obj::array< T, N > &a)
Non-member get function.
Definition: array.hpp:923
pmem::layout_error
Custom layout error class.
Definition: pexceptions.hpp:212
pmem::obj::concurrent_hash_map::bucket_accessor::get
bucket * get() const
Get bucket pointer.
Definition: concurrent_hash_map.hpp:1810
pmem::obj::concurrent_hash_map::const_accessor::value_type
const typename concurrent_hash_map::value_type value_type
Type of value.
Definition: concurrent_hash_map.hpp:2014
pmem::obj::concurrent_hash_map::serial_bucket_accessor::is_writer
bool is_writer() const
This method is added for consistency with bucket_accessor class.
Definition: concurrent_hash_map.hpp:1864
pmem::transaction_scope_error
Custom transaction error class.
Definition: pexceptions.hpp:192
pmem::obj::concurrent_hash_map::defragment
pobj_defrag_result defragment(double start_percent=0, double amount_percent=100)
Defragment the given (by 'start_percent' and 'amount_percent') part of buckets of the hash map.
Definition: concurrent_hash_map.hpp:2844
pmem::obj::concurrent_hash_map::internal_copy
void internal_copy(const concurrent_hash_map &source)
Copy "source" to *this, where *this must start out empty.
Definition: concurrent_hash_map.hpp:3408
pmem::obj::concurrent_hash_map::empty
bool empty() const
Definition: concurrent_hash_map.hpp:2397
pmem::obj::defrag::add
std::enable_if< is_defragmentable< T >), void >::type add(T &t)
Stores address of the referenced object to the defragmentation queue.
Definition: defrag.hpp:141
pmem::obj::concurrent_hash_map::find
bool find(const_accessor &result, const K &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2504
pmem::obj::concurrent_hash_map::insert
bool insert(const value_type &value)
Insert item by copying if there is no such key present already.
Definition: concurrent_hash_map.hpp:2632
pmem::obj::concurrent_hash_map::concurrent_hash_map
concurrent_hash_map(I first, I last)
Construction table with copying iteration range.
Definition: concurrent_hash_map.hpp:2155
pmem::obj::pool_base
The non-template pool base class.
Definition: pool.hpp:75
atomic_backoff.hpp
Atomic backoff, for time delay.
pmem::obj::concurrent_hash_map::insert
bool insert(const_accessor &result, const Key &key)
Insert item (if not already present) and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2565
pmem::obj::concurrent_hash_map::const_accessor
Combines data access, locking, and garbage collection.
Definition: concurrent_hash_map.hpp:2002
pmem::obj::concurrent_hash_map::begin
iterator begin()
Definition: concurrent_hash_map.hpp:2349
persistent_ptr.hpp
Persistent smart pointer.
pmem::obj::concurrent_hash_map::bucket_accessor::operator->
bucket * operator->() const
Overloaded arrow operator.
Definition: concurrent_hash_map.hpp:1819
pmem::obj::concurrent_hash_map::accessor::operator*
reference operator*() const
Return reference to associated value in hash table.
Definition: concurrent_hash_map.hpp:2096
pmem::obj::concurrent_hash_map::swap
void swap(concurrent_hash_map &table)
Swap two instances.
Definition: concurrent_hash_map.hpp:3291
pmem::obj::concurrent_hash_map::insert
void insert(I first, I last)
Insert range [first, last)
Definition: concurrent_hash_map.hpp:2697
pmem::obj::transaction::manual
C++ manual scope transaction class.
Definition: transaction.hpp:100
pmem::obj::operator--
p< T > & operator--(p< T > &pp)
Prefix decrement operator overload.
Definition: pext.hpp:88
pmem::obj::concurrent_hash_map::const_accessor::const_accessor
const_accessor()
Create empty result.
Definition: concurrent_hash_map.hpp:2066
pmem::obj::concurrent_hash_map::find
bool find(const_accessor &result, const Key &key) const
Find item and acquire a read lock on the item.
Definition: concurrent_hash_map.hpp:2475
pmem::obj::concurrent_hash_map
Persistent memory aware implementation of Intel TBB concurrent_hash_map.
Definition: concurrent_hash_map.hpp:1653
enumerable_thread_specific.hpp
A persistent version of thread-local storage.
pmem::obj::concurrent_hash_map::rehash
void rehash(size_type n=0)
Rehashes and optionally resizes the whole table.
Definition: concurrent_hash_map.hpp:3300
mutex.hpp
Pmem-resident mutex.
pmem::obj::concurrent_hash_map::serial_bucket_accessor
Serial bucket accessor used to access bucket in a serial operations.
Definition: concurrent_hash_map.hpp:1828
pmem::obj::concurrent_hash_map::const_accessor::empty
bool empty() const
Definition: concurrent_hash_map.hpp:2021
pmem::obj::concurrent_hash_map::end
const_iterator end() const
Definition: concurrent_hash_map.hpp:2379
pmem::obj::concurrent_hash_map::defrag_save_nodes
void defrag_save_nodes(bucket *b, pmem::obj::defrag &defrag)
Internal method used by defragment().
Definition: concurrent_hash_map.hpp:3020