IndexedMemPoolTest.cpp 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. * Copyright 2014-present Facebook, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <folly/IndexedMemPool.h>
  17. #include <folly/portability/GMock.h>
  18. #include <folly/portability/GTest.h>
  19. #include <folly/portability/Semaphore.h>
  20. #include <folly/portability/Unistd.h>
  21. #include <folly/test/DeterministicSchedule.h>
  22. #include <string>
  23. #include <thread>
  24. using namespace folly;
  25. using namespace folly::test;
  26. using namespace testing;
  27. TEST(IndexedMemPool, unique_ptr) {
  28. typedef IndexedMemPool<size_t> Pool;
  29. Pool pool(100);
  30. for (size_t i = 0; i < 100000; ++i) {
  31. auto ptr = pool.allocElem();
  32. EXPECT_TRUE(!!ptr);
  33. *ptr = i;
  34. }
  35. std::vector<Pool::UniquePtr> leak;
  36. while (true) {
  37. auto ptr = pool.allocElem();
  38. if (!ptr) {
  39. // good, we finally ran out
  40. break;
  41. }
  42. leak.emplace_back(std::move(ptr));
  43. EXPECT_LT(leak.size(), 10000u);
  44. }
  45. }
  46. TEST(IndexedMemPool, no_starvation) {
  47. const int count = 1000;
  48. const uint32_t poolSize = 100;
  49. typedef DeterministicSchedule Sched;
  50. Sched sched(Sched::uniform(0));
  51. typedef IndexedMemPool<int, 8, 8, DeterministicAtomic> Pool;
  52. Pool pool(poolSize);
  53. for (auto pass = 0; pass < 10; ++pass) {
  54. int fd[2];
  55. EXPECT_EQ(pipe(fd), 0);
  56. // makes sure we wait for available nodes, rather than fail allocIndex
  57. sem_t allocSem;
  58. sem_init(&allocSem, 0, poolSize);
  59. // this semaphore is only needed for deterministic replay, so that we
  60. // always block in an Sched:: operation rather than in a read() syscall
  61. sem_t readSem;
  62. sem_init(&readSem, 0, 0);
  63. std::thread produce = Sched::thread([&]() {
  64. for (auto i = 0; i < count; ++i) {
  65. Sched::wait(&allocSem);
  66. uint32_t idx = pool.allocIndex();
  67. EXPECT_NE(idx, 0u);
  68. EXPECT_LE(
  69. idx, poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
  70. pool[idx] = i;
  71. EXPECT_EQ(write(fd[1], &idx, sizeof(idx)), sizeof(idx));
  72. Sched::post(&readSem);
  73. }
  74. });
  75. std::thread consume = Sched::thread([&]() {
  76. for (auto i = 0; i < count; ++i) {
  77. uint32_t idx;
  78. Sched::wait(&readSem);
  79. EXPECT_EQ(read(fd[0], &idx, sizeof(idx)), sizeof(idx));
  80. EXPECT_NE(idx, 0);
  81. EXPECT_GE(idx, 1u);
  82. EXPECT_LE(
  83. idx, poolSize + (Pool::NumLocalLists - 1) * Pool::LocalListLimit);
  84. EXPECT_EQ(pool[idx], i);
  85. pool.recycleIndex(idx);
  86. Sched::post(&allocSem);
  87. }
  88. });
  89. Sched::join(produce);
  90. Sched::join(consume);
  91. close(fd[0]);
  92. close(fd[1]);
  93. }
  94. }
  95. TEST(IndexedMemPool, st_capacity) {
  96. // only one local list => capacity is exact
  97. typedef IndexedMemPool<int, 1, 32> Pool;
  98. Pool pool(10);
  99. EXPECT_EQ(pool.capacity(), 10u);
  100. EXPECT_EQ(Pool::maxIndexForCapacity(10), 10u);
  101. for (auto i = 0; i < 10; ++i) {
  102. EXPECT_NE(pool.allocIndex(), 0u);
  103. }
  104. EXPECT_EQ(pool.allocIndex(), 0u);
  105. }
  106. TEST(IndexedMemPool, mt_capacity) {
  107. typedef IndexedMemPool<int, 16, 32> Pool;
  108. Pool pool(1000);
  109. std::thread threads[10];
  110. for (auto i = 0; i < 10; ++i) {
  111. threads[i] = std::thread([&]() {
  112. for (auto j = 0; j < 100; ++j) {
  113. uint32_t idx = pool.allocIndex();
  114. EXPECT_NE(idx, 0u);
  115. }
  116. });
  117. }
  118. for (auto i = 0; i < 10; ++i) {
  119. threads[i].join();
  120. }
  121. for (auto i = 0; i < 16 * 32; ++i) {
  122. pool.allocIndex();
  123. }
  124. EXPECT_EQ(pool.allocIndex(), 0u);
  125. }
  126. TEST(IndexedMemPool, locate_elem) {
  127. IndexedMemPool<int> pool(1000);
  128. for (auto i = 0; i < 1000; ++i) {
  129. auto idx = pool.allocIndex();
  130. EXPECT_FALSE(idx == 0);
  131. int* elem = &pool[idx];
  132. EXPECT_TRUE(idx == pool.locateElem(elem));
  133. }
  134. EXPECT_EQ(pool.locateElem(nullptr), 0);
  135. }
  136. struct NonTrivialStruct {
  137. static FOLLY_TLS size_t count;
  138. size_t elem_;
  139. NonTrivialStruct() {
  140. elem_ = 0;
  141. ++count;
  142. }
  143. NonTrivialStruct(std::unique_ptr<std::string>&& arg1, size_t arg2) {
  144. elem_ = arg1->length() + arg2;
  145. ++count;
  146. }
  147. ~NonTrivialStruct() {
  148. --count;
  149. }
  150. };
  151. FOLLY_TLS size_t NonTrivialStruct::count;
  152. TEST(IndexedMemPool, eager_recycle) {
  153. typedef IndexedMemPool<NonTrivialStruct> Pool;
  154. Pool pool(100);
  155. EXPECT_EQ(NonTrivialStruct::count, 0);
  156. for (size_t i = 0; i < 10; ++i) {
  157. {
  158. std::unique_ptr<std::string> arg{new std::string{"abc"}};
  159. auto ptr = pool.allocElem(std::move(arg), 100);
  160. EXPECT_EQ(NonTrivialStruct::count, 1);
  161. EXPECT_EQ(ptr->elem_, 103);
  162. EXPECT_TRUE(!!ptr);
  163. }
  164. EXPECT_EQ(NonTrivialStruct::count, 0);
  165. }
  166. }
  167. TEST(IndexedMemPool, late_recycle) {
  168. {
  169. using Pool = IndexedMemPool<
  170. NonTrivialStruct,
  171. 8,
  172. 8,
  173. std::atomic,
  174. IndexedMemPoolTraitsLazyRecycle<NonTrivialStruct>>;
  175. Pool pool(100);
  176. EXPECT_EQ(NonTrivialStruct::count, 0);
  177. for (size_t i = 0; i < 10; ++i) {
  178. {
  179. auto ptr = pool.allocElem();
  180. EXPECT_TRUE(NonTrivialStruct::count > 0);
  181. EXPECT_TRUE(!!ptr);
  182. ptr->elem_ = i;
  183. }
  184. EXPECT_TRUE(NonTrivialStruct::count > 0);
  185. }
  186. }
  187. EXPECT_EQ(NonTrivialStruct::count, 0);
  188. }
  189. TEST(IndexedMemPool, no_data_races) {
  190. const int count = 1000;
  191. const uint32_t poolSize = 100;
  192. const int nthreads = 10;
  193. using Pool = IndexedMemPool<int, 8, 8>;
  194. Pool pool(poolSize);
  195. std::vector<std::thread> thr(nthreads);
  196. for (auto i = 0; i < nthreads; ++i) {
  197. thr[i] = std::thread([&]() {
  198. for (auto j = 0; j < count; ++j) {
  199. uint32_t idx = pool.allocIndex();
  200. EXPECT_NE(idx, 0u);
  201. EXPECT_LE(
  202. idx, poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
  203. pool[idx] = j;
  204. pool.recycleIndex(idx);
  205. }
  206. });
  207. }
  208. for (auto& t : thr) {
  209. t.join();
  210. }
  211. }
  212. std::atomic<int> cnum{0};
  213. std::atomic<int> dnum{0};
  214. TEST(IndexedMemPool, construction_destruction) {
  215. struct Foo {
  216. Foo() {
  217. cnum.fetch_add(1);
  218. }
  219. ~Foo() {
  220. dnum.fetch_add(1);
  221. }
  222. };
  223. std::atomic<bool> start{false};
  224. std::atomic<int> started{0};
  225. using Pool = IndexedMemPool<
  226. Foo,
  227. 1,
  228. 1,
  229. std::atomic,
  230. IndexedMemPoolTraitsLazyRecycle<Foo>>;
  231. int nthreads = 20;
  232. int count = 1000;
  233. {
  234. Pool pool(2);
  235. std::vector<std::thread> thr(nthreads);
  236. for (auto i = 0; i < nthreads; ++i) {
  237. thr[i] = std::thread([&]() {
  238. started.fetch_add(1);
  239. while (!start.load()) {
  240. ;
  241. }
  242. for (auto j = 0; j < count; ++j) {
  243. uint32_t idx = pool.allocIndex();
  244. if (idx != 0) {
  245. pool.recycleIndex(idx);
  246. }
  247. }
  248. });
  249. }
  250. while (started.load() < nthreads) {
  251. ;
  252. }
  253. start.store(true);
  254. for (auto& t : thr) {
  255. t.join();
  256. }
  257. }
  258. CHECK_EQ(cnum.load(), dnum.load());
  259. }
  260. /// Global Traits mock. It can't be a regular (non-global) mock because we
  261. /// don't have access to the instance.
  262. struct MockTraits {
  263. static MockTraits* instance;
  264. MockTraits() {
  265. instance = this;
  266. }
  267. ~MockTraits() {
  268. instance = nullptr;
  269. }
  270. MOCK_METHOD2(onAllocate, void(std::string*, std::string));
  271. MOCK_METHOD1(onRecycle, void(std::string*));
  272. struct Forwarder {
  273. static void initialize(std::string* ptr) {
  274. new (ptr) std::string();
  275. }
  276. static void cleanup(std::string* ptr) {
  277. using std::string;
  278. ptr->~string();
  279. }
  280. static void onAllocate(std::string* ptr, std::string s) {
  281. instance->onAllocate(ptr, s);
  282. }
  283. static void onRecycle(std::string* ptr) {
  284. instance->onRecycle(ptr);
  285. }
  286. };
  287. };
  288. MockTraits* MockTraits::instance;
  289. using TraitsTestPool =
  290. IndexedMemPool<std::string, 1, 1, std::atomic, MockTraits::Forwarder>;
  291. void testTraits(TraitsTestPool& pool) {
  292. MockTraits traits;
  293. const std::string* elem = nullptr;
  294. EXPECT_CALL(traits, onAllocate(_, _))
  295. .WillOnce(Invoke([&](std::string* s, auto) {
  296. EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
  297. elem = s;
  298. }));
  299. std::string* ptr = pool.allocElem("foo").release();
  300. EXPECT_EQ(ptr, elem);
  301. elem = nullptr;
  302. EXPECT_CALL(traits, onRecycle(_)).WillOnce(Invoke([&](std::string* s) {
  303. EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
  304. elem = s;
  305. }));
  306. pool.recycleIndex(pool.locateElem(ptr));
  307. EXPECT_EQ(ptr, elem);
  308. }
  309. // Test that Traits is used when both local and global lists are empty.
  310. TEST(IndexedMemPool, use_traits_empty) {
  311. TraitsTestPool pool(10);
  312. testTraits(pool);
  313. }
  314. // Test that Traits is used when allocating from a local list.
  315. TEST(IndexedMemPool, use_traits_local_list) {
  316. TraitsTestPool pool(10);
  317. MockTraits traits;
  318. EXPECT_CALL(traits, onAllocate(_, _));
  319. // Allocate and immediately recycle an element to populate the local list.
  320. pool.allocElem("");
  321. testTraits(pool);
  322. }
  323. // Test that Traits is used when allocating from a global list.
  324. TEST(IndexedMemPool, use_traits_global_list) {
  325. TraitsTestPool pool(10);
  326. MockTraits traits;
  327. EXPECT_CALL(traits, onAllocate(_, _)).Times(2);
  328. auto global = pool.allocElem("");
  329. // Allocate and immediately recycle an element to fill the local list.
  330. pool.allocElem("");
  331. // Recycle an element to populate the global list.
  332. global.reset();
  333. testTraits(pool);
  334. }
  335. // Test that IndexedMemPool works with incomplete element types.
  336. struct IncompleteTestElement;
  337. using IncompleteTestPool = IndexedMemPool<IncompleteTestElement>;