AtomicHashMap-inl.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Copyright 2012-present Facebook, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef FOLLY_ATOMICHASHMAP_H_
  17. #error "This should only be included by AtomicHashMap.h"
  18. #endif
  19. #include <folly/detail/AtomicHashUtils.h>
  20. namespace folly {
  21. // AtomicHashMap constructor -- Atomic wrapper that allows growth
  22. // This class has a lot of overhead (184 Bytes) so only use for big maps
  23. template <
  24. typename KeyT,
  25. typename ValueT,
  26. typename HashFcn,
  27. typename EqualFcn,
  28. typename Allocator,
  29. typename ProbeFcn,
  30. typename KeyConvertFcn>
  31. AtomicHashMap<
  32. KeyT,
  33. ValueT,
  34. HashFcn,
  35. EqualFcn,
  36. Allocator,
  37. ProbeFcn,
  38. KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config)
  39. : kGrowthFrac_(
  40. config.growthFactor < 0 ? 1.0f - config.maxLoadFactor
  41. : config.growthFactor) {
  42. CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f);
  43. subMaps_[0].store(
  44. SubMap::create(finalSizeEst, config).release(),
  45. std::memory_order_relaxed);
  46. auto subMapCount = kNumSubMaps_;
  47. FOR_EACH_RANGE (i, 1, subMapCount) {
  48. subMaps_[i].store(nullptr, std::memory_order_relaxed);
  49. }
  50. numMapsAllocated_.store(1, std::memory_order_relaxed);
  51. }
  52. // emplace --
  53. template <
  54. typename KeyT,
  55. typename ValueT,
  56. typename HashFcn,
  57. typename EqualFcn,
  58. typename Allocator,
  59. typename ProbeFcn,
  60. typename KeyConvertFcn>
  61. template <
  62. typename LookupKeyT,
  63. typename LookupHashFcn,
  64. typename LookupEqualFcn,
  65. typename LookupKeyToKeyFcn,
  66. typename... ArgTs>
  67. std::pair<
  68. typename AtomicHashMap<
  69. KeyT,
  70. ValueT,
  71. HashFcn,
  72. EqualFcn,
  73. Allocator,
  74. ProbeFcn,
  75. KeyConvertFcn>::iterator,
  76. bool>
  77. AtomicHashMap<
  78. KeyT,
  79. ValueT,
  80. HashFcn,
  81. EqualFcn,
  82. Allocator,
  83. ProbeFcn,
  84. KeyConvertFcn>::emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
  85. SimpleRetT ret = insertInternal<
  86. LookupKeyT,
  87. LookupHashFcn,
  88. LookupEqualFcn,
  89. LookupKeyToKeyFcn>(k, std::forward<ArgTs>(vCtorArgs)...);
  90. SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
  91. return std::make_pair(
  92. iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success);
  93. }
  94. // insertInternal -- Allocates new sub maps as existing ones fill up.
  95. template <
  96. typename KeyT,
  97. typename ValueT,
  98. typename HashFcn,
  99. typename EqualFcn,
  100. typename Allocator,
  101. typename ProbeFcn,
  102. typename KeyConvertFcn>
  103. template <
  104. typename LookupKeyT,
  105. typename LookupHashFcn,
  106. typename LookupEqualFcn,
  107. typename LookupKeyToKeyFcn,
  108. typename... ArgTs>
  109. typename AtomicHashMap<
  110. KeyT,
  111. ValueT,
  112. HashFcn,
  113. EqualFcn,
  114. Allocator,
  115. ProbeFcn,
  116. KeyConvertFcn>::SimpleRetT
  117. AtomicHashMap<
  118. KeyT,
  119. ValueT,
  120. HashFcn,
  121. EqualFcn,
  122. Allocator,
  123. ProbeFcn,
  124. KeyConvertFcn>::insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
  125. beginInsertInternal:
  126. auto nextMapIdx = // this maintains our state
  127. numMapsAllocated_.load(std::memory_order_acquire);
  128. typename SubMap::SimpleRetT ret;
  129. FOR_EACH_RANGE (i, 0, nextMapIdx) {
  130. // insert in each map successively. If one succeeds, we're done!
  131. SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
  132. ret = subMap->template insertInternal<
  133. LookupKeyT,
  134. LookupHashFcn,
  135. LookupEqualFcn,
  136. LookupKeyToKeyFcn>(key, std::forward<ArgTs>(vCtorArgs)...);
  137. if (ret.idx == subMap->capacity_) {
  138. continue; // map is full, so try the next one
  139. }
  140. // Either collision or success - insert in either case
  141. return SimpleRetT(i, ret.idx, ret.success);
  142. }
  143. // If we made it this far, all maps are full and we need to try to allocate
  144. // the next one.
  145. SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
  146. if (nextMapIdx >= kNumSubMaps_ ||
  147. primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
  148. // Can't allocate any more sub maps.
  149. throw AtomicHashMapFullError();
  150. }
  151. if (tryLockMap(nextMapIdx)) {
  152. // Alloc a new map and shove it in. We can change whatever
  153. // we want because other threads are waiting on us...
  154. size_t numCellsAllocated = (size_t)(
  155. primarySubMap->capacity_ *
  156. std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
  157. size_t newSize = size_t(numCellsAllocated * kGrowthFrac_);
  158. DCHECK(
  159. subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
  160. (SubMap*)kLockedPtr_);
  161. // create a new map using the settings stored in the first map
  162. Config config;
  163. config.emptyKey = primarySubMap->kEmptyKey_;
  164. config.lockedKey = primarySubMap->kLockedKey_;
  165. config.erasedKey = primarySubMap->kErasedKey_;
  166. config.maxLoadFactor = primarySubMap->maxLoadFactor();
  167. config.entryCountThreadCacheSize =
  168. primarySubMap->getEntryCountThreadCacheSize();
  169. subMaps_[nextMapIdx].store(
  170. SubMap::create(newSize, config).release(), std::memory_order_relaxed);
  171. // Publish the map to other threads.
  172. numMapsAllocated_.fetch_add(1, std::memory_order_release);
  173. DCHECK_EQ(
  174. nextMapIdx + 1, numMapsAllocated_.load(std::memory_order_relaxed));
  175. } else {
  176. // If we lost the race, we'll have to wait for the next map to get
  177. // allocated before doing any insertion here.
  178. detail::atomic_hash_spin_wait([&] {
  179. return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
  180. });
  181. }
  182. // Relaxed is ok here because either we just created this map, or we
  183. // just did a spin wait with an acquire load on numMapsAllocated_.
  184. SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
  185. DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
  186. ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
  187. if (ret.idx != loadedMap->capacity_) {
  188. return SimpleRetT(nextMapIdx, ret.idx, ret.success);
  189. }
  190. // We took way too long and the new map is already full...try again from
  191. // the top (this should pretty much never happen).
  192. goto beginInsertInternal;
  193. }
  194. // find --
  195. template <
  196. typename KeyT,
  197. typename ValueT,
  198. typename HashFcn,
  199. typename EqualFcn,
  200. typename Allocator,
  201. typename ProbeFcn,
  202. typename KeyConvertFcn>
  203. template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
  204. typename AtomicHashMap<
  205. KeyT,
  206. ValueT,
  207. HashFcn,
  208. EqualFcn,
  209. Allocator,
  210. ProbeFcn,
  211. KeyConvertFcn>::iterator
  212. AtomicHashMap<
  213. KeyT,
  214. ValueT,
  215. HashFcn,
  216. EqualFcn,
  217. Allocator,
  218. ProbeFcn,
  219. KeyConvertFcn>::find(LookupKeyT k) {
  220. SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
  221. if (!ret.success) {
  222. return end();
  223. }
  224. SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
  225. return iterator(this, ret.i, subMap->makeIter(ret.j));
  226. }
  227. template <
  228. typename KeyT,
  229. typename ValueT,
  230. typename HashFcn,
  231. typename EqualFcn,
  232. typename Allocator,
  233. typename ProbeFcn,
  234. typename KeyConvertFcn>
  235. template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
  236. typename AtomicHashMap<
  237. KeyT,
  238. ValueT,
  239. HashFcn,
  240. EqualFcn,
  241. Allocator,
  242. ProbeFcn,
  243. KeyConvertFcn>::const_iterator
  244. AtomicHashMap<
  245. KeyT,
  246. ValueT,
  247. HashFcn,
  248. EqualFcn,
  249. Allocator,
  250. ProbeFcn,
  251. KeyConvertFcn>::find(LookupKeyT k) const {
  252. return const_cast<AtomicHashMap*>(this)
  253. ->find<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
  254. }
  255. // findInternal --
  256. template <
  257. typename KeyT,
  258. typename ValueT,
  259. typename HashFcn,
  260. typename EqualFcn,
  261. typename Allocator,
  262. typename ProbeFcn,
  263. typename KeyConvertFcn>
  264. template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
  265. typename AtomicHashMap<
  266. KeyT,
  267. ValueT,
  268. HashFcn,
  269. EqualFcn,
  270. Allocator,
  271. ProbeFcn,
  272. KeyConvertFcn>::SimpleRetT
  273. AtomicHashMap<
  274. KeyT,
  275. ValueT,
  276. HashFcn,
  277. EqualFcn,
  278. Allocator,
  279. ProbeFcn,
  280. KeyConvertFcn>::findInternal(const LookupKeyT k) const {
  281. SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
  282. typename SubMap::SimpleRetT ret =
  283. primaryMap
  284. ->template findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
  285. if (LIKELY(ret.idx != primaryMap->capacity_)) {
  286. return SimpleRetT(0, ret.idx, ret.success);
  287. }
  288. const unsigned int numMaps =
  289. numMapsAllocated_.load(std::memory_order_acquire);
  290. FOR_EACH_RANGE (i, 1, numMaps) {
  291. // Check each map successively. If one succeeds, we're done!
  292. SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
  293. ret =
  294. thisMap
  295. ->template findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(
  296. k);
  297. if (LIKELY(ret.idx != thisMap->capacity_)) {
  298. return SimpleRetT(i, ret.idx, ret.success);
  299. }
  300. }
  301. // Didn't find our key...
  302. return SimpleRetT(numMaps, 0, false);
  303. }
  304. // findAtInternal -- see encodeIndex() for details.
  305. template <
  306. typename KeyT,
  307. typename ValueT,
  308. typename HashFcn,
  309. typename EqualFcn,
  310. typename Allocator,
  311. typename ProbeFcn,
  312. typename KeyConvertFcn>
  313. typename AtomicHashMap<
  314. KeyT,
  315. ValueT,
  316. HashFcn,
  317. EqualFcn,
  318. Allocator,
  319. ProbeFcn,
  320. KeyConvertFcn>::SimpleRetT
  321. AtomicHashMap<
  322. KeyT,
  323. ValueT,
  324. HashFcn,
  325. EqualFcn,
  326. Allocator,
  327. ProbeFcn,
  328. KeyConvertFcn>::findAtInternal(uint32_t idx) const {
  329. uint32_t subMapIdx, subMapOffset;
  330. if (idx & kSecondaryMapBit_) {
  331. // idx falls in a secondary map
  332. idx &= ~kSecondaryMapBit_; // unset secondary bit
  333. subMapIdx = idx >> kSubMapIndexShift_;
  334. DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
  335. subMapOffset = idx & kSubMapIndexMask_;
  336. } else {
  337. // idx falls in primary map
  338. subMapIdx = 0;
  339. subMapOffset = idx;
  340. }
  341. return SimpleRetT(subMapIdx, subMapOffset, true);
  342. }
  343. // erase --
  344. template <
  345. typename KeyT,
  346. typename ValueT,
  347. typename HashFcn,
  348. typename EqualFcn,
  349. typename Allocator,
  350. typename ProbeFcn,
  351. typename KeyConvertFcn>
  352. typename AtomicHashMap<
  353. KeyT,
  354. ValueT,
  355. HashFcn,
  356. EqualFcn,
  357. Allocator,
  358. ProbeFcn,
  359. KeyConvertFcn>::size_type
  360. AtomicHashMap<
  361. KeyT,
  362. ValueT,
  363. HashFcn,
  364. EqualFcn,
  365. Allocator,
  366. ProbeFcn,
  367. KeyConvertFcn>::erase(const KeyT k) {
  368. int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
  369. FOR_EACH_RANGE (i, 0, numMaps) {
  370. // Check each map successively. If one succeeds, we're done!
  371. if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
  372. return 1;
  373. }
  374. }
  375. // Didn't find our key...
  376. return 0;
  377. }
  378. // capacity -- summation of capacities of all submaps
  379. template <
  380. typename KeyT,
  381. typename ValueT,
  382. typename HashFcn,
  383. typename EqualFcn,
  384. typename Allocator,
  385. typename ProbeFcn,
  386. typename KeyConvertFcn>
  387. size_t AtomicHashMap<
  388. KeyT,
  389. ValueT,
  390. HashFcn,
  391. EqualFcn,
  392. Allocator,
  393. ProbeFcn,
  394. KeyConvertFcn>::capacity() const {
  395. size_t totalCap(0);
  396. int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
  397. FOR_EACH_RANGE (i, 0, numMaps) {
  398. totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
  399. }
  400. return totalCap;
  401. }
  402. // spaceRemaining --
  403. // number of new insertions until current submaps are all at max load
  404. template <
  405. typename KeyT,
  406. typename ValueT,
  407. typename HashFcn,
  408. typename EqualFcn,
  409. typename Allocator,
  410. typename ProbeFcn,
  411. typename KeyConvertFcn>
  412. size_t AtomicHashMap<
  413. KeyT,
  414. ValueT,
  415. HashFcn,
  416. EqualFcn,
  417. Allocator,
  418. ProbeFcn,
  419. KeyConvertFcn>::spaceRemaining() const {
  420. size_t spaceRem(0);
  421. int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
  422. FOR_EACH_RANGE (i, 0, numMaps) {
  423. SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
  424. spaceRem +=
  425. std::max(0, thisMap->maxEntries_ - &thisMap->numEntries_.readFull());
  426. }
  427. return spaceRem;
  428. }
  429. // clear -- Wipes all keys and values from primary map and destroys
  430. // all secondary maps. Not thread safe.
  431. template <
  432. typename KeyT,
  433. typename ValueT,
  434. typename HashFcn,
  435. typename EqualFcn,
  436. typename Allocator,
  437. typename ProbeFcn,
  438. typename KeyConvertFcn>
  439. void AtomicHashMap<
  440. KeyT,
  441. ValueT,
  442. HashFcn,
  443. EqualFcn,
  444. Allocator,
  445. ProbeFcn,
  446. KeyConvertFcn>::clear() {
  447. subMaps_[0].load(std::memory_order_relaxed)->clear();
  448. int const numMaps = numMapsAllocated_.load(std::memory_order_relaxed);
  449. FOR_EACH_RANGE (i, 1, numMaps) {
  450. SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
  451. DCHECK(thisMap);
  452. SubMap::destroy(thisMap);
  453. subMaps_[i].store(nullptr, std::memory_order_relaxed);
  454. }
  455. numMapsAllocated_.store(1, std::memory_order_relaxed);
  456. }
  457. // size --
  458. template <
  459. typename KeyT,
  460. typename ValueT,
  461. typename HashFcn,
  462. typename EqualFcn,
  463. typename Allocator,
  464. typename ProbeFcn,
  465. typename KeyConvertFcn>
  466. size_t AtomicHashMap<
  467. KeyT,
  468. ValueT,
  469. HashFcn,
  470. EqualFcn,
  471. Allocator,
  472. ProbeFcn,
  473. KeyConvertFcn>::size() const {
  474. size_t totalSize(0);
  475. int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
  476. FOR_EACH_RANGE (i, 0, numMaps) {
  477. totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
  478. }
  479. return totalSize;
  480. }
  481. // encodeIndex -- Encode the submap index and offset into return.
  482. // index_ret must be pre-populated with the submap offset.
  483. //
  484. // We leave index_ret untouched when referring to the primary map
  485. // so it can be as large as possible (31 data bits). Max size of
  486. // secondary maps is limited by what can fit in the low 27 bits.
  487. //
  488. // Returns the following bit-encoded data in index_ret:
  489. // if subMap == 0 (primary map) =>
  490. // bit(s) value
  491. // 31 0
  492. // 0-30 submap offset (index_ret input)
  493. //
  494. // if subMap > 0 (secondary maps) =>
  495. // bit(s) value
  496. // 31 1
  497. // 27-30 which subMap
  498. // 0-26 subMap offset (index_ret input)
  499. template <
  500. typename KeyT,
  501. typename ValueT,
  502. typename HashFcn,
  503. typename EqualFcn,
  504. typename Allocator,
  505. typename ProbeFcn,
  506. typename KeyConvertFcn>
  507. inline uint32_t AtomicHashMap<
  508. KeyT,
  509. ValueT,
  510. HashFcn,
  511. EqualFcn,
  512. Allocator,
  513. ProbeFcn,
  514. KeyConvertFcn>::encodeIndex(uint32_t subMap, uint32_t offset) {
  515. DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
  516. if (subMap == 0) {
  517. return offset;
  518. }
  519. // Make sure subMap isn't too big
  520. DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
  521. // Make sure subMap bits of offset are clear
  522. DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
  523. // Set high-order bits to encode which submap this index belongs to
  524. return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
  525. }
  526. // Iterator implementation
  527. template <
  528. typename KeyT,
  529. typename ValueT,
  530. typename HashFcn,
  531. typename EqualFcn,
  532. typename Allocator,
  533. typename ProbeFcn,
  534. typename KeyConvertFcn>
  535. template <class ContT, class IterVal, class SubIt>
  536. struct AtomicHashMap<
  537. KeyT,
  538. ValueT,
  539. HashFcn,
  540. EqualFcn,
  541. Allocator,
  542. ProbeFcn,
  543. KeyConvertFcn>::ahm_iterator
  544. : boost::iterator_facade<
  545. ahm_iterator<ContT, IterVal, SubIt>,
  546. IterVal,
  547. boost::forward_traversal_tag> {
  548. explicit ahm_iterator() : ahm_(nullptr) {}
  549. // Conversion ctor for interoperability between const_iterator and
  550. // iterator. The enable_if<> magic keeps us well-behaved for
  551. // is_convertible<> (v. the iterator_facade documentation).
  552. template <class OtherContT, class OtherVal, class OtherSubIt>
  553. ahm_iterator(
  554. const ahm_iterator<OtherContT, OtherVal, OtherSubIt>& o,
  555. typename std::enable_if<
  556. std::is_convertible<OtherSubIt, SubIt>::value>::type* = nullptr)
  557. : ahm_(o.ahm_), subMap_(o.subMap_), subIt_(o.subIt_) {}
  558. /*
  559. * Returns the unique index that can be used for access directly
  560. * into the data storage.
  561. */
  562. uint32_t getIndex() const {
  563. CHECK(!isEnd());
  564. return ahm_->encodeIndex(subMap_, subIt_.getIndex());
  565. }
  566. private:
  567. friend class AtomicHashMap;
  568. explicit ahm_iterator(ContT* ahm, uint32_t subMap, const SubIt& subIt)
  569. : ahm_(ahm), subMap_(subMap), subIt_(subIt) {}
  570. friend class boost::iterator_core_access;
  571. void increment() {
  572. CHECK(!isEnd());
  573. ++subIt_;
  574. checkAdvanceToNextSubmap();
  575. }
  576. bool equal(const ahm_iterator& other) const {
  577. if (ahm_ != other.ahm_) {
  578. return false;
  579. }
  580. if (isEnd() || other.isEnd()) {
  581. return isEnd() == other.isEnd();
  582. }
  583. return subMap_ == other.subMap_ && subIt_ == other.subIt_;
  584. }
  585. IterVal& dereference() const {
  586. return *subIt_;
  587. }
  588. bool isEnd() const {
  589. return ahm_ == nullptr;
  590. }
  591. void checkAdvanceToNextSubmap() {
  592. if (isEnd()) {
  593. return;
  594. }
  595. SubMap* thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
  596. while (subIt_ == thisMap->end()) {
  597. // This sub iterator is done, advance to next one
  598. if (subMap_ + 1 <
  599. ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
  600. ++subMap_;
  601. thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
  602. subIt_ = thisMap->begin();
  603. } else {
  604. ahm_ = nullptr;
  605. return;
  606. }
  607. }
  608. }
  609. private:
  610. ContT* ahm_;
  611. uint32_t subMap_;
  612. SubIt subIt_;
  613. }; // ahm_iterator
  614. } // namespace folly