SynchronizedTestLib-inl.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882
  1. /*
  2. * Copyright 2012-present Facebook, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #pragma once
  17. #include <folly/Random.h>
  18. #include <folly/Synchronized.h>
  19. #include <folly/container/Foreach.h>
  20. #include <folly/portability/GTest.h>
  21. #include <glog/logging.h>
  22. #include <algorithm>
  23. #include <condition_variable>
  24. #include <functional>
  25. #include <map>
  26. #include <random>
  27. #include <thread>
  28. #include <vector>
  29. namespace folly {
  30. namespace sync_tests {
  31. inline std::mt19937& getRNG() {
  32. static const auto seed = folly::randomNumberSeed();
  33. static std::mt19937 rng(seed);
  34. return rng;
  35. }
  36. void randomSleep(std::chrono::milliseconds min, std::chrono::milliseconds max) {
  37. std::uniform_int_distribution<> range(min.count(), max.count());
  38. std::chrono::milliseconds duration(range(getRNG()));
  39. /* sleep override */
  40. std::this_thread::sleep_for(duration);
  41. }
  42. /*
  43. * Run a functon simultaneously in a number of different threads.
  44. *
  45. * The function will be passed the index number of the thread it is running in.
  46. * This function makes an attempt to synchronize the start of the threads as
  47. * best as possible. It waits for all threads to be allocated and started
  48. * before invoking the function.
  49. */
  50. template <class Function>
  51. void runParallel(size_t numThreads, const Function& function) {
  52. std::vector<std::thread> threads;
  53. threads.reserve(numThreads);
  54. // Variables used to synchronize all threads to try and start them
  55. // as close to the same time as possible
  56. folly::Synchronized<size_t, std::mutex> threadsReady(0);
  57. std::condition_variable readyCV;
  58. folly::Synchronized<bool, std::mutex> go(false);
  59. std::condition_variable goCV;
  60. auto worker = [&](size_t threadIndex) {
  61. // Signal that we are ready
  62. ++(*threadsReady.lock());
  63. readyCV.notify_one();
  64. // Wait until we are given the signal to start
  65. // The purpose of this is to try and make sure all threads start
  66. // as close to the same time as possible.
  67. {
  68. auto lockedGo = go.lock();
  69. goCV.wait(lockedGo.getUniqueLock(), [&] { return *lockedGo; });
  70. }
  71. function(threadIndex);
  72. };
  73. // Start all of the threads
  74. for (size_t threadIndex = 0; threadIndex < numThreads; ++threadIndex) {
  75. threads.emplace_back([threadIndex, &worker]() { worker(threadIndex); });
  76. }
  77. // Wait for all threads to become ready
  78. {
  79. auto readyLocked = threadsReady.lock();
  80. readyCV.wait(readyLocked.getUniqueLock(), [&] {
  81. return *readyLocked == numThreads;
  82. });
  83. }
  84. // Now signal the threads that they can go
  85. go = true;
  86. goCV.notify_all();
  87. // Wait for all threads to finish
  88. for (auto& thread : threads) {
  89. thread.join();
  90. }
  91. }
  92. // testBasic() version for shared lock types
  93. template <class Mutex>
  94. typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type
  95. testBasicImpl() {
  96. folly::Synchronized<std::vector<int>, Mutex> obj;
  97. const auto& constObj = obj;
  98. obj.wlock()->resize(1000);
  99. folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.wlock()};
  100. EXPECT_EQ(1000, obj2.rlock()->size());
  101. {
  102. auto lockedObj = obj.wlock();
  103. lockedObj->push_back(10);
  104. EXPECT_EQ(1001, lockedObj->size());
  105. EXPECT_EQ(10, lockedObj->back());
  106. EXPECT_EQ(1000, obj2.wlock()->size());
  107. EXPECT_EQ(1000, obj2.rlock()->size());
  108. {
  109. auto unlocker = lockedObj.scopedUnlock();
  110. EXPECT_EQ(1001, obj.wlock()->size());
  111. }
  112. }
  113. {
  114. auto lockedObj = obj.rlock();
  115. EXPECT_EQ(1001, lockedObj->size());
  116. EXPECT_EQ(1001, obj.rlock()->size());
  117. {
  118. auto unlocker = lockedObj.scopedUnlock();
  119. EXPECT_EQ(1001, obj.wlock()->size());
  120. }
  121. }
  122. obj.wlock()->front() = 2;
  123. {
  124. // contextualLock() on a const reference should grab a shared lock
  125. auto lockedObj = constObj.contextualLock();
  126. EXPECT_EQ(2, lockedObj->front());
  127. EXPECT_EQ(2, constObj.rlock()->front());
  128. EXPECT_EQ(2, obj.rlock()->front());
  129. }
  130. EXPECT_EQ(1001, obj.rlock()->size());
  131. EXPECT_EQ(2, obj.rlock()->front());
  132. EXPECT_EQ(10, obj.rlock()->back());
  133. EXPECT_EQ(1000, obj2.rlock()->size());
  134. }
  135. // testBasic() version for non-shared lock types
  136. template <class Mutex>
  137. typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type
  138. testBasicImpl() {
  139. folly::Synchronized<std::vector<int>, Mutex> obj;
  140. const auto& constObj = obj;
  141. obj.lock()->resize(1000);
  142. folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.lock()};
  143. EXPECT_EQ(1000, obj2.lock()->size());
  144. {
  145. auto lockedObj = obj.lock();
  146. lockedObj->push_back(10);
  147. EXPECT_EQ(1001, lockedObj->size());
  148. EXPECT_EQ(10, lockedObj->back());
  149. EXPECT_EQ(1000, obj2.lock()->size());
  150. {
  151. auto unlocker = lockedObj.scopedUnlock();
  152. EXPECT_EQ(1001, obj.lock()->size());
  153. }
  154. }
  155. {
  156. auto lockedObj = constObj.lock();
  157. EXPECT_EQ(1001, lockedObj->size());
  158. EXPECT_EQ(10, lockedObj->back());
  159. EXPECT_EQ(1000, obj2.lock()->size());
  160. }
  161. obj.lock()->front() = 2;
  162. EXPECT_EQ(1001, obj.lock()->size());
  163. EXPECT_EQ(2, obj.lock()->front());
  164. EXPECT_EQ(2, obj.contextualLock()->front());
  165. EXPECT_EQ(10, obj.lock()->back());
  166. EXPECT_EQ(1000, obj2.lock()->size());
  167. }
  168. template <class Mutex>
  169. void testBasic() {
  170. testBasicImpl<Mutex>();
  171. }
  172. // testWithLock() version for shared lock types
  173. template <class Mutex>
  174. typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type
  175. testWithLock() {
  176. folly::Synchronized<std::vector<int>, Mutex> obj;
  177. const auto& constObj = obj;
  178. // Test withWLock() and withRLock()
  179. obj.withWLock([](std::vector<int>& lockedObj) {
  180. lockedObj.resize(1000);
  181. lockedObj.push_back(10);
  182. lockedObj.push_back(11);
  183. });
  184. obj.withWLock([](const std::vector<int>& lockedObj) {
  185. EXPECT_EQ(1002, lockedObj.size());
  186. });
  187. obj.withRLock([](const std::vector<int>& lockedObj) {
  188. EXPECT_EQ(1002, lockedObj.size());
  189. EXPECT_EQ(11, lockedObj.back());
  190. });
  191. constObj.withRLock([](const std::vector<int>& lockedObj) {
  192. EXPECT_EQ(1002, lockedObj.size());
  193. });
  194. #if __cpp_generic_lambdas >= 201304
  195. obj.withWLock([](auto& lockedObj) { lockedObj.push_back(12); });
  196. obj.withWLock(
  197. [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
  198. obj.withRLock([](const auto& lockedObj) {
  199. EXPECT_EQ(1003, lockedObj.size());
  200. EXPECT_EQ(12, lockedObj.back());
  201. });
  202. constObj.withRLock(
  203. [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
  204. obj.withWLock([](auto& lockedObj) { lockedObj.pop_back(); });
  205. #endif
  206. // Test withWLockPtr() and withRLockPtr()
  207. using SynchType = folly::Synchronized<std::vector<int>, Mutex>;
  208. #if __cpp_generic_lambdas >= 201304
  209. obj.withWLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); });
  210. obj.withRLockPtr([](auto&& lockedObj) {
  211. EXPECT_EQ(1003, lockedObj->size());
  212. EXPECT_EQ(13, lockedObj->back());
  213. });
  214. constObj.withRLockPtr([](auto&& lockedObj) {
  215. EXPECT_EQ(1003, lockedObj->size());
  216. EXPECT_EQ(13, lockedObj->back());
  217. });
  218. obj.withWLockPtr([&](auto&& lockedObj) {
  219. lockedObj->push_back(14);
  220. {
  221. auto unlocker = lockedObj.scopedUnlock();
  222. obj.wlock()->push_back(15);
  223. }
  224. EXPECT_EQ(15, lockedObj->back());
  225. });
  226. #else
  227. obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
  228. lockedObj->push_back(13);
  229. lockedObj->push_back(14);
  230. lockedObj->push_back(15);
  231. });
  232. #endif
  233. obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
  234. lockedObj->push_back(16);
  235. EXPECT_EQ(1006, lockedObj->size());
  236. });
  237. obj.withRLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) {
  238. EXPECT_EQ(1006, lockedObj->size());
  239. EXPECT_EQ(16, lockedObj->back());
  240. });
  241. constObj.withRLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) {
  242. EXPECT_EQ(1006, lockedObj->size());
  243. EXPECT_EQ(16, lockedObj->back());
  244. });
  245. }
  246. // testWithLock() version for non-shared lock types
  247. template <class Mutex>
  248. typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type
  249. testWithLock() {
  250. folly::Synchronized<std::vector<int>, Mutex> obj;
  251. // Test withLock()
  252. obj.withLock([](std::vector<int>& lockedObj) {
  253. lockedObj.resize(1000);
  254. lockedObj.push_back(10);
  255. lockedObj.push_back(11);
  256. });
  257. obj.withLock([](const std::vector<int>& lockedObj) {
  258. EXPECT_EQ(1002, lockedObj.size());
  259. });
  260. #if __cpp_generic_lambdas >= 201304
  261. obj.withLock([](auto& lockedObj) { lockedObj.push_back(12); });
  262. obj.withLock(
  263. [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); });
  264. obj.withLock([](auto& lockedObj) { lockedObj.pop_back(); });
  265. #endif
  266. // Test withLockPtr()
  267. using SynchType = folly::Synchronized<std::vector<int>, Mutex>;
  268. #if __cpp_generic_lambdas >= 201304
  269. obj.withLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); });
  270. obj.withLockPtr([](auto&& lockedObj) {
  271. EXPECT_EQ(1003, lockedObj->size());
  272. EXPECT_EQ(13, lockedObj->back());
  273. });
  274. obj.withLockPtr([&](auto&& lockedObj) {
  275. lockedObj->push_back(14);
  276. {
  277. auto unlocker = lockedObj.scopedUnlock();
  278. obj.lock()->push_back(15);
  279. }
  280. EXPECT_EQ(1005, lockedObj->size());
  281. EXPECT_EQ(15, lockedObj->back());
  282. });
  283. #else
  284. obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
  285. lockedObj->push_back(13);
  286. lockedObj->push_back(14);
  287. lockedObj->push_back(15);
  288. });
  289. #endif
  290. obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) {
  291. lockedObj->push_back(16);
  292. EXPECT_EQ(1006, lockedObj->size());
  293. });
  294. const auto& constObj = obj;
  295. constObj.withLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) {
  296. EXPECT_EQ(1006, lockedObj->size());
  297. EXPECT_EQ(16, lockedObj->back());
  298. });
  299. }
  300. template <class Mutex>
  301. void testUnlockCommon() {
  302. folly::Synchronized<int, Mutex> value{7};
  303. const auto& cv = value;
  304. {
  305. auto lv = value.contextualLock();
  306. EXPECT_EQ(7, *lv);
  307. *lv = 5;
  308. lv.unlock();
  309. EXPECT_TRUE(lv.isNull());
  310. EXPECT_FALSE(lv);
  311. auto rlv = cv.contextualLock();
  312. EXPECT_EQ(5, *rlv);
  313. rlv.unlock();
  314. EXPECT_TRUE(rlv.isNull());
  315. EXPECT_FALSE(rlv);
  316. auto rlv2 = cv.contextualRLock();
  317. EXPECT_EQ(5, *rlv2);
  318. rlv2.unlock();
  319. lv = value.contextualLock();
  320. EXPECT_EQ(5, *lv);
  321. *lv = 9;
  322. }
  323. EXPECT_EQ(9, *value.contextualRLock());
  324. }
  325. // testUnlock() version for shared lock types
  326. template <class Mutex>
  327. typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type
  328. testUnlock() {
  329. folly::Synchronized<int, Mutex> value{10};
  330. {
  331. auto lv = value.wlock();
  332. EXPECT_EQ(10, *lv);
  333. *lv = 5;
  334. lv.unlock();
  335. EXPECT_FALSE(lv);
  336. EXPECT_TRUE(lv.isNull());
  337. auto rlv = value.rlock();
  338. EXPECT_EQ(5, *rlv);
  339. rlv.unlock();
  340. EXPECT_FALSE(rlv);
  341. EXPECT_TRUE(rlv.isNull());
  342. auto lv2 = value.wlock();
  343. EXPECT_EQ(5, *lv2);
  344. *lv2 = 7;
  345. lv = std::move(lv2);
  346. EXPECT_FALSE(lv2);
  347. EXPECT_TRUE(lv2.isNull());
  348. EXPECT_FALSE(lv.isNull());
  349. EXPECT_EQ(7, *lv);
  350. }
  351. testUnlockCommon<Mutex>();
  352. }
  353. // testUnlock() version for non-shared lock types
  354. template <class Mutex>
  355. typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type
  356. testUnlock() {
  357. folly::Synchronized<int, Mutex> value{10};
  358. {
  359. auto lv = value.lock();
  360. EXPECT_EQ(10, *lv);
  361. *lv = 5;
  362. lv.unlock();
  363. EXPECT_TRUE(lv.isNull());
  364. EXPECT_FALSE(lv);
  365. auto lv2 = value.lock();
  366. EXPECT_EQ(5, *lv2);
  367. *lv2 = 6;
  368. lv2.unlock();
  369. EXPECT_TRUE(lv2.isNull());
  370. EXPECT_FALSE(lv2);
  371. lv = value.lock();
  372. EXPECT_EQ(6, *lv);
  373. *lv = 7;
  374. lv2 = std::move(lv);
  375. EXPECT_TRUE(lv.isNull());
  376. EXPECT_FALSE(lv);
  377. EXPECT_FALSE(lv2.isNull());
  378. EXPECT_EQ(7, *lv2);
  379. }
  380. testUnlockCommon<Mutex>();
  381. }
  382. // Testing the deprecated SYNCHRONIZED and SYNCHRONIZED_CONST APIs
  383. template <class Mutex>
  384. void testDeprecated() {
  385. folly::Synchronized<std::vector<int>, Mutex> obj;
  386. obj->resize(1000);
  387. auto obj2 = obj;
  388. EXPECT_EQ(1000, obj2->size());
  389. SYNCHRONIZED(obj) {
  390. obj.push_back(10);
  391. EXPECT_EQ(1001, obj.size());
  392. EXPECT_EQ(10, obj.back());
  393. EXPECT_EQ(1000, obj2->size());
  394. }
  395. SYNCHRONIZED_CONST(obj) {
  396. EXPECT_EQ(1001, obj.size());
  397. }
  398. SYNCHRONIZED(lockedObj, *&obj) {
  399. lockedObj.front() = 2;
  400. }
  401. EXPECT_EQ(1001, obj->size());
  402. EXPECT_EQ(10, obj->back());
  403. EXPECT_EQ(1000, obj2->size());
  404. EXPECT_EQ(FB_ARG_2_OR_1(1, 2), 2);
  405. EXPECT_EQ(FB_ARG_2_OR_1(1), 1);
  406. }
  407. template <class Mutex>
  408. void testConcurrency() {
  409. folly::Synchronized<std::vector<int>, Mutex> v;
  410. static const size_t numThreads = 100;
  411. // Note: I initially tried using itersPerThread = 1000,
  412. // which works fine for most lock types, but std::shared_timed_mutex
  413. // appears to be extraordinarily slow. It could take around 30 seconds
  414. // to run this test with 1000 iterations per thread using shared_timed_mutex.
  415. static const size_t itersPerThread = 100;
  416. auto pushNumbers = [&](size_t threadIdx) {
  417. // Test lock()
  418. for (size_t n = 0; n < itersPerThread; ++n) {
  419. v.contextualLock()->push_back((itersPerThread * threadIdx) + n);
  420. std::this_thread::yield();
  421. }
  422. };
  423. runParallel(numThreads, pushNumbers);
  424. std::vector<int> result;
  425. v.swap(result);
  426. EXPECT_EQ(numThreads * itersPerThread, result.size());
  427. sort(result.begin(), result.end());
  428. for (size_t i = 0; i < itersPerThread * numThreads; ++i) {
  429. EXPECT_EQ(i, result[i]);
  430. }
  431. }
  432. template <class Mutex>
  433. void testAcquireLocked() {
  434. folly::Synchronized<std::vector<int>, Mutex> v;
  435. folly::Synchronized<std::map<int, int>, Mutex> m;
  436. auto dualLockWorker = [&](size_t threadIdx) {
  437. // Note: this will be less awkward with C++ 17's structured
  438. // binding functionality, which will make it easier to use the returned
  439. // std::tuple.
  440. if (threadIdx & 1) {
  441. auto ret = acquireLocked(v, m);
  442. std::get<0>(ret)->push_back(threadIdx);
  443. (*std::get<1>(ret))[threadIdx] = threadIdx + 1;
  444. } else {
  445. auto ret = acquireLocked(m, v);
  446. std::get<1>(ret)->push_back(threadIdx);
  447. (*std::get<0>(ret))[threadIdx] = threadIdx + 1;
  448. }
  449. };
  450. static const size_t numThreads = 100;
  451. runParallel(numThreads, dualLockWorker);
  452. std::vector<int> result;
  453. v.swap(result);
  454. EXPECT_EQ(numThreads, result.size());
  455. sort(result.begin(), result.end());
  456. for (size_t i = 0; i < numThreads; ++i) {
  457. EXPECT_EQ(i, result[i]);
  458. }
  459. }
  460. template <class Mutex>
  461. void testAcquireLockedWithConst() {
  462. folly::Synchronized<std::vector<int>, Mutex> v;
  463. folly::Synchronized<std::map<int, int>, Mutex> m;
  464. auto dualLockWorker = [&](size_t threadIdx) {
  465. const auto& cm = m;
  466. if (threadIdx & 1) {
  467. auto ret = acquireLocked(v, cm);
  468. (void)std::get<1>(ret)->size();
  469. std::get<0>(ret)->push_back(threadIdx);
  470. } else {
  471. auto ret = acquireLocked(cm, v);
  472. (void)std::get<0>(ret)->size();
  473. std::get<1>(ret)->push_back(threadIdx);
  474. }
  475. };
  476. static const size_t numThreads = 100;
  477. runParallel(numThreads, dualLockWorker);
  478. std::vector<int> result;
  479. v.swap(result);
  480. EXPECT_EQ(numThreads, result.size());
  481. sort(result.begin(), result.end());
  482. for (size_t i = 0; i < numThreads; ++i) {
  483. EXPECT_EQ(i, result[i]);
  484. }
  485. }
  486. // Testing the deprecated SYNCHRONIZED_DUAL API
  487. template <class Mutex>
  488. void testDualLocking() {
  489. folly::Synchronized<std::vector<int>, Mutex> v;
  490. folly::Synchronized<std::map<int, int>, Mutex> m;
  491. auto dualLockWorker = [&](size_t threadIdx) {
  492. if (threadIdx & 1) {
  493. SYNCHRONIZED_DUAL(lv, v, lm, m) {
  494. lv.push_back(threadIdx);
  495. lm[threadIdx] = threadIdx + 1;
  496. }
  497. } else {
  498. SYNCHRONIZED_DUAL(lm, m, lv, v) {
  499. lv.push_back(threadIdx);
  500. lm[threadIdx] = threadIdx + 1;
  501. }
  502. }
  503. };
  504. static const size_t numThreads = 100;
  505. runParallel(numThreads, dualLockWorker);
  506. std::vector<int> result;
  507. v.swap(result);
  508. EXPECT_EQ(numThreads, result.size());
  509. sort(result.begin(), result.end());
  510. for (size_t i = 0; i < numThreads; ++i) {
  511. EXPECT_EQ(i, result[i]);
  512. }
  513. }
  514. // Testing the deprecated SYNCHRONIZED_DUAL API
  515. template <class Mutex>
  516. void testDualLockingWithConst() {
  517. folly::Synchronized<std::vector<int>, Mutex> v;
  518. folly::Synchronized<std::map<int, int>, Mutex> m;
  519. auto dualLockWorker = [&](size_t threadIdx) {
  520. const auto& cm = m;
  521. if (threadIdx & 1) {
  522. SYNCHRONIZED_DUAL(lv, v, lm, cm) {
  523. (void)lm.size();
  524. lv.push_back(threadIdx);
  525. }
  526. } else {
  527. SYNCHRONIZED_DUAL(lm, cm, lv, v) {
  528. (void)lm.size();
  529. lv.push_back(threadIdx);
  530. }
  531. }
  532. };
  533. static const size_t numThreads = 100;
  534. runParallel(numThreads, dualLockWorker);
  535. std::vector<int> result;
  536. v.swap(result);
  537. EXPECT_EQ(numThreads, result.size());
  538. sort(result.begin(), result.end());
  539. for (size_t i = 0; i < numThreads; ++i) {
  540. EXPECT_EQ(i, result[i]);
  541. }
  542. }
  543. template <class Mutex>
  544. void testTimed() {
  545. folly::Synchronized<std::vector<int>, Mutex> v;
  546. folly::Synchronized<uint64_t, Mutex> numTimeouts;
  547. auto worker = [&](size_t threadIdx) {
  548. // Test directly using operator-> on the lock result
  549. v.contextualLock()->push_back(2 * threadIdx);
  550. // Test using lock with a timeout
  551. for (;;) {
  552. auto lv = v.contextualLock(std::chrono::milliseconds(5));
  553. if (!lv) {
  554. ++(*numTimeouts.contextualLock());
  555. continue;
  556. }
  557. // Sleep for a random time to ensure we trigger timeouts
  558. // in other threads
  559. randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15));
  560. lv->push_back(2 * threadIdx + 1);
  561. break;
  562. }
  563. };
  564. static const size_t numThreads = 100;
  565. runParallel(numThreads, worker);
  566. std::vector<int> result;
  567. v.swap(result);
  568. EXPECT_EQ(2 * numThreads, result.size());
  569. sort(result.begin(), result.end());
  570. for (size_t i = 0; i < 2 * numThreads; ++i) {
  571. EXPECT_EQ(i, result[i]);
  572. }
  573. // We generally expect a large number of number timeouts here.
  574. // I'm not adding a check for it since it's theoretically possible that
  575. // we might get 0 timeouts depending on the CPU scheduling if our threads
  576. // don't get to run very often.
  577. LOG(INFO) << "testTimed: " << *numTimeouts.contextualRLock() << " timeouts";
  578. // Make sure we can lock with various timeout duration units
  579. {
  580. auto lv = v.contextualLock(std::chrono::milliseconds(5));
  581. EXPECT_TRUE(bool(lv));
  582. EXPECT_FALSE(lv.isNull());
  583. auto lv2 = v.contextualLock(std::chrono::microseconds(5));
  584. // We may or may not acquire lv2 successfully, depending on whether
  585. // or not this is a recursive mutex type.
  586. }
  587. {
  588. auto lv = v.contextualLock(std::chrono::seconds(1));
  589. EXPECT_TRUE(bool(lv));
  590. }
  591. }
  592. template <class Mutex>
  593. void testTimedShared() {
  594. folly::Synchronized<std::vector<int>, Mutex> v;
  595. folly::Synchronized<uint64_t, Mutex> numTimeouts;
  596. auto worker = [&](size_t threadIdx) {
  597. // Test directly using operator-> on the lock result
  598. v.wlock()->push_back(threadIdx);
  599. // Test lock() with a timeout
  600. for (;;) {
  601. auto lv = v.rlock(std::chrono::milliseconds(10));
  602. if (!lv) {
  603. ++(*numTimeouts.contextualLock());
  604. continue;
  605. }
  606. // Sleep while holding the lock.
  607. //
  608. // This will block other threads from acquiring the write lock to add
  609. // their thread index to v, but it won't block threads that have entered
  610. // the for loop and are trying to acquire a read lock.
  611. //
  612. // For lock types that give preference to readers rather than writers,
  613. // this will tend to serialize all threads on the wlock() above.
  614. randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15));
  615. auto found = std::find(lv->begin(), lv->end(), threadIdx);
  616. CHECK(found != lv->end());
  617. break;
  618. }
  619. };
  620. static const size_t numThreads = 100;
  621. runParallel(numThreads, worker);
  622. std::vector<int> result;
  623. v.swap(result);
  624. EXPECT_EQ(numThreads, result.size());
  625. sort(result.begin(), result.end());
  626. for (size_t i = 0; i < numThreads; ++i) {
  627. EXPECT_EQ(i, result[i]);
  628. }
  629. // We generally expect a small number of timeouts here.
  630. // For locks that give readers preference over writers this should usually
  631. // be 0. With locks that give writers preference we do see a small-ish
  632. // number of read timeouts.
  633. LOG(INFO) << "testTimedShared: " << *numTimeouts.contextualRLock()
  634. << " timeouts";
  635. }
  636. // Testing the deprecated TIMED_SYNCHRONIZED API
  637. template <class Mutex>
  638. void testTimedSynchronized() {
  639. folly::Synchronized<std::vector<int>, Mutex> v;
  640. folly::Synchronized<uint64_t, Mutex> numTimeouts;
  641. auto worker = [&](size_t threadIdx) {
  642. // Test operator->
  643. v->push_back(2 * threadIdx);
  644. // Aaand test the TIMED_SYNCHRONIZED macro
  645. for (;;) {
  646. TIMED_SYNCHRONIZED(5, lv, v) {
  647. if (lv) {
  648. // Sleep for a random time to ensure we trigger timeouts
  649. // in other threads
  650. randomSleep(
  651. std::chrono::milliseconds(5), std::chrono::milliseconds(15));
  652. lv->push_back(2 * threadIdx + 1);
  653. return;
  654. }
  655. ++(*numTimeouts.contextualLock());
  656. }
  657. }
  658. };
  659. static const size_t numThreads = 100;
  660. runParallel(numThreads, worker);
  661. std::vector<int> result;
  662. v.swap(result);
  663. EXPECT_EQ(2 * numThreads, result.size());
  664. sort(result.begin(), result.end());
  665. for (size_t i = 0; i < 2 * numThreads; ++i) {
  666. EXPECT_EQ(i, result[i]);
  667. }
  668. // We generally expect a large number of number timeouts here.
  669. // I'm not adding a check for it since it's theoretically possible that
  670. // we might get 0 timeouts depending on the CPU scheduling if our threads
  671. // don't get to run very often.
  672. LOG(INFO) << "testTimedSynchronized: " << *numTimeouts.contextualRLock()
  673. << " timeouts";
  674. }
  675. // Testing the deprecated TIMED_SYNCHRONIZED_CONST API
  676. template <class Mutex>
  677. void testTimedSynchronizedWithConst() {
  678. folly::Synchronized<std::vector<int>, Mutex> v;
  679. folly::Synchronized<uint64_t, Mutex> numTimeouts;
  680. auto worker = [&](size_t threadIdx) {
  681. // Test operator->
  682. v->push_back(threadIdx);
  683. // Test TIMED_SYNCHRONIZED_CONST
  684. for (;;) {
  685. TIMED_SYNCHRONIZED_CONST(10, lv, v) {
  686. if (lv) {
  687. // Sleep while holding the lock.
  688. //
  689. // This will block other threads from acquiring the write lock to add
  690. // their thread index to v, but it won't block threads that have
  691. // entered the for loop and are trying to acquire a read lock.
  692. //
  693. // For lock types that give preference to readers rather than writers,
  694. // this will tend to serialize all threads on the wlock() above.
  695. randomSleep(
  696. std::chrono::milliseconds(5), std::chrono::milliseconds(15));
  697. auto found = std::find(lv->begin(), lv->end(), threadIdx);
  698. CHECK(found != lv->end());
  699. return;
  700. } else {
  701. ++(*numTimeouts.contextualLock());
  702. }
  703. }
  704. }
  705. };
  706. static const size_t numThreads = 100;
  707. runParallel(numThreads, worker);
  708. std::vector<int> result;
  709. v.swap(result);
  710. EXPECT_EQ(numThreads, result.size());
  711. sort(result.begin(), result.end());
  712. for (size_t i = 0; i < numThreads; ++i) {
  713. EXPECT_EQ(i, result[i]);
  714. }
  715. // We generally expect a small number of timeouts here.
  716. // For locks that give readers preference over writers this should usually
  717. // be 0. With locks that give writers preference we do see a small-ish
  718. // number of read timeouts.
  719. LOG(INFO) << "testTimedSynchronizedWithConst: "
  720. << *numTimeouts.contextualRLock() << " timeouts";
  721. }
  722. template <class Mutex>
  723. void testConstCopy() {
  724. std::vector<int> input = {1, 2, 3};
  725. const folly::Synchronized<std::vector<int>, Mutex> v(input);
  726. std::vector<int> result;
  727. v.copy(&result);
  728. EXPECT_EQ(input, result);
  729. result = v.copy();
  730. EXPECT_EQ(input, result);
  731. }
  732. struct NotCopiableNotMovable {
  733. NotCopiableNotMovable(int, const char*) {}
  734. NotCopiableNotMovable(const NotCopiableNotMovable&) = delete;
  735. NotCopiableNotMovable& operator=(const NotCopiableNotMovable&) = delete;
  736. NotCopiableNotMovable(NotCopiableNotMovable&&) = delete;
  737. NotCopiableNotMovable& operator=(NotCopiableNotMovable&&) = delete;
  738. };
  739. template <class Mutex>
  740. void testInPlaceConstruction() {
  741. // This won't compile without in_place
  742. folly::Synchronized<NotCopiableNotMovable> a(folly::in_place, 5, "a");
  743. }
  744. template <class Mutex>
  745. void testExchange() {
  746. std::vector<int> input = {1, 2, 3};
  747. folly::Synchronized<std::vector<int>, Mutex> v(input);
  748. std::vector<int> next = {4, 5, 6};
  749. auto prev = v.exchange(std::move(next));
  750. EXPECT_EQ((std::vector<int>{{1, 2, 3}}), prev);
  751. EXPECT_EQ((std::vector<int>{{4, 5, 6}}), v.copy());
  752. }
  753. } // namespace sync_tests
  754. } // namespace folly