SharedMutexTest.cpp 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155
  1. /*
  2. * Copyright 2015-present Facebook, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <folly/SharedMutex.h>
  17. #include <stdlib.h>
  18. #include <thread>
  19. #include <vector>
  20. #include <boost/optional.hpp>
  21. #include <boost/thread/shared_mutex.hpp>
  22. #include <folly/Benchmark.h>
  23. #include <folly/MPMCQueue.h>
  24. #include <folly/portability/GFlags.h>
  25. #include <folly/portability/GTest.h>
  26. #include <folly/synchronization/RWSpinLock.h>
  27. #include <folly/test/DeterministicSchedule.h>
  28. #include <folly/test/TestUtils.h>
  29. using namespace folly;
  30. using namespace folly::test;
  31. using namespace std;
  32. using namespace std::chrono;
  33. typedef DeterministicSchedule DSched;
  34. typedef SharedMutexImpl<true, void, DeterministicAtomic, true>
  35. DSharedMutexReadPriority;
  36. typedef SharedMutexImpl<false, void, DeterministicAtomic, true>
  37. DSharedMutexWritePriority;
  38. template <typename Lock>
  39. void runBasicTest() {
  40. Lock lock;
  41. SharedMutexToken token1;
  42. SharedMutexToken token2;
  43. SharedMutexToken token3;
  44. EXPECT_TRUE(lock.try_lock());
  45. EXPECT_FALSE(lock.try_lock());
  46. EXPECT_FALSE(lock.try_lock_shared(token1));
  47. lock.unlock();
  48. EXPECT_TRUE(lock.try_lock_shared(token1));
  49. EXPECT_FALSE(lock.try_lock());
  50. EXPECT_TRUE(lock.try_lock_shared(token2));
  51. lock.lock_shared(token3);
  52. lock.unlock_shared(token3);
  53. lock.unlock_shared(token2);
  54. lock.unlock_shared(token1);
  55. lock.lock();
  56. lock.unlock();
  57. lock.lock_shared(token1);
  58. lock.lock_shared(token2);
  59. lock.unlock_shared(token1);
  60. lock.unlock_shared(token2);
  61. lock.lock();
  62. lock.unlock_and_lock_shared(token1);
  63. lock.lock_shared(token2);
  64. lock.unlock_shared(token2);
  65. lock.unlock_shared(token1);
  66. }
  67. TEST(SharedMutex, basic) {
  68. runBasicTest<SharedMutexReadPriority>();
  69. runBasicTest<SharedMutexWritePriority>();
  70. runBasicTest<SharedMutexSuppressTSAN>();
  71. }
  72. template <typename Lock>
  73. void runBasicHoldersTest() {
  74. Lock lock;
  75. SharedMutexToken token;
  76. {
  77. // create an exclusive write lock via holder
  78. typename Lock::WriteHolder holder(lock);
  79. EXPECT_FALSE(lock.try_lock());
  80. EXPECT_FALSE(lock.try_lock_shared(token));
  81. // move ownership to another write holder via move constructor
  82. typename Lock::WriteHolder holder2(std::move(holder));
  83. EXPECT_FALSE(lock.try_lock());
  84. EXPECT_FALSE(lock.try_lock_shared(token));
  85. // move ownership to another write holder via assign operator
  86. typename Lock::WriteHolder holder3(nullptr);
  87. holder3 = std::move(holder2);
  88. EXPECT_FALSE(lock.try_lock());
  89. EXPECT_FALSE(lock.try_lock_shared(token));
  90. // downgrade from exclusive to upgrade lock via move constructor
  91. typename Lock::UpgradeHolder holder4(std::move(holder3));
  92. // ensure we can lock from a shared source
  93. EXPECT_FALSE(lock.try_lock());
  94. EXPECT_TRUE(lock.try_lock_shared(token));
  95. lock.unlock_shared(token);
  96. // promote from upgrade to exclusive lock via move constructor
  97. typename Lock::WriteHolder holder5(std::move(holder4));
  98. EXPECT_FALSE(lock.try_lock());
  99. EXPECT_FALSE(lock.try_lock_shared(token));
  100. // downgrade exclusive to shared lock via move constructor
  101. typename Lock::ReadHolder holder6(std::move(holder5));
  102. // ensure we can lock from another shared source
  103. EXPECT_FALSE(lock.try_lock());
  104. EXPECT_TRUE(lock.try_lock_shared(token));
  105. lock.unlock_shared(token);
  106. }
  107. {
  108. typename Lock::WriteHolder holder(lock);
  109. EXPECT_FALSE(lock.try_lock());
  110. }
  111. {
  112. typename Lock::ReadHolder holder(lock);
  113. typename Lock::ReadHolder holder2(lock);
  114. typename Lock::UpgradeHolder holder3(lock);
  115. }
  116. {
  117. typename Lock::UpgradeHolder holder(lock);
  118. typename Lock::ReadHolder holder2(lock);
  119. typename Lock::ReadHolder holder3(std::move(holder));
  120. }
  121. }
  122. TEST(SharedMutex, basic_holders) {
  123. runBasicHoldersTest<SharedMutexReadPriority>();
  124. runBasicHoldersTest<SharedMutexWritePriority>();
  125. runBasicHoldersTest<SharedMutexSuppressTSAN>();
  126. }
  127. template <typename Lock>
  128. void runManyReadLocksTestWithTokens() {
  129. Lock lock;
  130. vector<SharedMutexToken> tokens;
  131. for (int i = 0; i < 1000; ++i) {
  132. tokens.emplace_back();
  133. EXPECT_TRUE(lock.try_lock_shared(tokens.back()));
  134. }
  135. for (auto& token : tokens) {
  136. lock.unlock_shared(token);
  137. }
  138. EXPECT_TRUE(lock.try_lock());
  139. lock.unlock();
  140. }
  141. TEST(SharedMutex, many_read_locks_with_tokens) {
  142. // This test fails in an assertion in the TSAN library because there are too
  143. // many mutexes
  144. SKIP_IF(folly::kIsSanitizeThread);
  145. runManyReadLocksTestWithTokens<SharedMutexReadPriority>();
  146. runManyReadLocksTestWithTokens<SharedMutexWritePriority>();
  147. runManyReadLocksTestWithTokens<SharedMutexSuppressTSAN>();
  148. }
  149. template <typename Lock>
  150. void runManyReadLocksTestWithoutTokens() {
  151. Lock lock;
  152. for (int i = 0; i < 1000; ++i) {
  153. EXPECT_TRUE(lock.try_lock_shared());
  154. }
  155. for (int i = 0; i < 1000; ++i) {
  156. lock.unlock_shared();
  157. }
  158. EXPECT_TRUE(lock.try_lock());
  159. lock.unlock();
  160. }
  161. TEST(SharedMutex, many_read_locks_without_tokens) {
  162. // This test fails in an assertion in the TSAN library because there are too
  163. // many mutexes
  164. SKIP_IF(folly::kIsSanitizeThread);
  165. runManyReadLocksTestWithoutTokens<SharedMutexReadPriority>();
  166. runManyReadLocksTestWithoutTokens<SharedMutexWritePriority>();
  167. runManyReadLocksTestWithoutTokens<SharedMutexSuppressTSAN>();
  168. }
  169. template <typename Lock>
  170. void runTimeoutInPastTest() {
  171. Lock lock;
  172. EXPECT_TRUE(lock.try_lock_for(milliseconds(0)));
  173. lock.unlock();
  174. EXPECT_TRUE(lock.try_lock_for(milliseconds(-1)));
  175. lock.unlock();
  176. EXPECT_TRUE(lock.try_lock_shared_for(milliseconds(0)));
  177. lock.unlock_shared();
  178. EXPECT_TRUE(lock.try_lock_shared_for(milliseconds(-1)));
  179. lock.unlock_shared();
  180. EXPECT_TRUE(lock.try_lock_until(system_clock::now() - milliseconds(1)));
  181. lock.unlock();
  182. EXPECT_TRUE(
  183. lock.try_lock_shared_until(system_clock::now() - milliseconds(1)));
  184. lock.unlock_shared();
  185. EXPECT_TRUE(lock.try_lock_until(steady_clock::now() - milliseconds(1)));
  186. lock.unlock();
  187. EXPECT_TRUE(
  188. lock.try_lock_shared_until(steady_clock::now() - milliseconds(1)));
  189. lock.unlock_shared();
  190. }
  191. TEST(SharedMutex, timeout_in_past) {
  192. runTimeoutInPastTest<SharedMutexReadPriority>();
  193. runTimeoutInPastTest<SharedMutexWritePriority>();
  194. runTimeoutInPastTest<SharedMutexSuppressTSAN>();
  195. }
  196. template <class Func>
  197. bool funcHasDuration(milliseconds expectedDuration, Func func) {
  198. // elapsed time should eventually fall within expectedDuration +- 25%
  199. for (int tries = 0; tries < 100; ++tries) {
  200. auto start = steady_clock::now();
  201. func();
  202. auto elapsed = steady_clock::now() - start;
  203. if (elapsed > expectedDuration - expectedDuration / 4 &&
  204. elapsed < expectedDuration + expectedDuration / 4) {
  205. return true;
  206. }
  207. }
  208. return false;
  209. }
  210. template <typename Lock>
  211. void runFailingTryTimeoutTest() {
  212. Lock lock;
  213. lock.lock();
  214. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  215. EXPECT_FALSE(lock.try_lock_for(milliseconds(10)));
  216. }));
  217. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  218. typename Lock::Token token;
  219. EXPECT_FALSE(lock.try_lock_shared_for(milliseconds(10), token));
  220. }));
  221. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  222. EXPECT_FALSE(lock.try_lock_upgrade_for(milliseconds(10)));
  223. }));
  224. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  225. EXPECT_FALSE(lock.try_lock_until(steady_clock::now() + milliseconds(10)));
  226. }));
  227. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  228. typename Lock::Token token;
  229. EXPECT_FALSE(lock.try_lock_shared_until(
  230. steady_clock::now() + milliseconds(10), token));
  231. }));
  232. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  233. EXPECT_FALSE(
  234. lock.try_lock_upgrade_until(steady_clock::now() + milliseconds(10)));
  235. }));
  236. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  237. EXPECT_FALSE(lock.try_lock_until(system_clock::now() + milliseconds(10)));
  238. }));
  239. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  240. typename Lock::Token token;
  241. EXPECT_FALSE(lock.try_lock_shared_until(
  242. system_clock::now() + milliseconds(10), token));
  243. }));
  244. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  245. EXPECT_FALSE(
  246. lock.try_lock_upgrade_until(system_clock::now() + milliseconds(10)));
  247. }));
  248. lock.unlock();
  249. lock.lock_shared();
  250. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  251. EXPECT_FALSE(lock.try_lock_for(milliseconds(10)));
  252. }));
  253. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  254. EXPECT_FALSE(lock.try_lock_until(steady_clock::now() + milliseconds(10)));
  255. }));
  256. EXPECT_TRUE(funcHasDuration(milliseconds(10), [&] {
  257. EXPECT_FALSE(lock.try_lock_until(system_clock::now() + milliseconds(10)));
  258. }));
  259. lock.unlock_shared();
  260. lock.lock();
  261. for (int p = 0; p < 8; ++p) {
  262. EXPECT_FALSE(lock.try_lock_for(nanoseconds(1 << p)));
  263. }
  264. lock.unlock();
  265. for (int p = 0; p < 8; ++p) {
  266. typename Lock::ReadHolder holder1(lock);
  267. typename Lock::ReadHolder holder2(lock);
  268. typename Lock::ReadHolder holder3(lock);
  269. EXPECT_FALSE(lock.try_lock_for(nanoseconds(1 << p)));
  270. }
  271. }
  272. TEST(SharedMutex, failing_try_timeout) {
  273. runFailingTryTimeoutTest<SharedMutexReadPriority>();
  274. runFailingTryTimeoutTest<SharedMutexWritePriority>();
  275. runFailingTryTimeoutTest<SharedMutexSuppressTSAN>();
  276. }
  277. template <typename Lock>
  278. void runBasicUpgradeTest() {
  279. Lock lock;
  280. typename Lock::Token token1;
  281. typename Lock::Token token2;
  282. lock.lock_upgrade();
  283. EXPECT_FALSE(lock.try_lock());
  284. EXPECT_TRUE(lock.try_lock_shared(token1));
  285. lock.unlock_shared(token1);
  286. lock.unlock_upgrade();
  287. lock.lock_upgrade();
  288. lock.unlock_upgrade_and_lock();
  289. EXPECT_FALSE(lock.try_lock_shared(token1));
  290. lock.unlock();
  291. lock.lock_upgrade();
  292. lock.unlock_upgrade_and_lock_shared(token1);
  293. lock.lock_upgrade();
  294. lock.unlock_upgrade_and_lock_shared(token2);
  295. lock.unlock_shared(token1);
  296. lock.unlock_shared(token2);
  297. lock.lock();
  298. lock.unlock_and_lock_upgrade();
  299. EXPECT_TRUE(lock.try_lock_shared(token1));
  300. lock.unlock_upgrade();
  301. lock.unlock_shared(token1);
  302. }
  303. TEST(SharedMutex, basic_upgrade_tests) {
  304. runBasicUpgradeTest<SharedMutexReadPriority>();
  305. runBasicUpgradeTest<SharedMutexWritePriority>();
  306. runBasicUpgradeTest<SharedMutexSuppressTSAN>();
  307. }
  308. TEST(SharedMutex, read_has_prio) {
  309. SharedMutexReadPriority lock;
  310. SharedMutexToken token1;
  311. SharedMutexToken token2;
  312. lock.lock_shared(token1);
  313. bool exclusiveAcquired = false;
  314. auto writer = thread([&] {
  315. lock.lock();
  316. exclusiveAcquired = true;
  317. lock.unlock();
  318. });
  319. // lock() can't complete until we unlock token1, but it should stake
  320. // its claim with regards to other exclusive or upgrade locks. We can
  321. // use try_lock_upgrade to poll for that eventuality.
  322. while (lock.try_lock_upgrade()) {
  323. lock.unlock_upgrade();
  324. this_thread::yield();
  325. }
  326. EXPECT_FALSE(exclusiveAcquired);
  327. // Even though lock() is stuck we should be able to get token2
  328. EXPECT_TRUE(lock.try_lock_shared(token2));
  329. lock.unlock_shared(token1);
  330. lock.unlock_shared(token2);
  331. writer.join();
  332. EXPECT_TRUE(exclusiveAcquired);
  333. }
  334. TEST(SharedMutex, write_has_prio) {
  335. SharedMutexWritePriority lock;
  336. SharedMutexToken token1;
  337. SharedMutexToken token2;
  338. lock.lock_shared(token1);
  339. auto writer = thread([&] {
  340. lock.lock();
  341. lock.unlock();
  342. });
  343. // eventually lock() should block readers
  344. while (lock.try_lock_shared(token2)) {
  345. lock.unlock_shared(token2);
  346. this_thread::yield();
  347. }
  348. lock.unlock_shared(token1);
  349. writer.join();
  350. }
  351. struct TokenLocker {
  352. SharedMutexToken token;
  353. template <typename T>
  354. void lock(T* lockable) {
  355. lockable->lock();
  356. }
  357. template <typename T>
  358. void unlock(T* lockable) {
  359. lockable->unlock();
  360. }
  361. template <typename T>
  362. void lock_shared(T* lockable) {
  363. lockable->lock_shared(token);
  364. }
  365. template <typename T>
  366. void unlock_shared(T* lockable) {
  367. lockable->unlock_shared(token);
  368. }
  369. };
  370. struct Locker {
  371. template <typename T>
  372. void lock(T* lockable) {
  373. lockable->lock();
  374. }
  375. template <typename T>
  376. void unlock(T* lockable) {
  377. lockable->unlock();
  378. }
  379. template <typename T>
  380. void lock_shared(T* lockable) {
  381. lockable->lock_shared();
  382. }
  383. template <typename T>
  384. void unlock_shared(T* lockable) {
  385. lockable->unlock_shared();
  386. }
  387. };
  388. struct EnterLocker {
  389. template <typename T>
  390. void lock(T* lockable) {
  391. lockable->lock(0);
  392. }
  393. template <typename T>
  394. void unlock(T* lockable) {
  395. lockable->unlock();
  396. }
  397. template <typename T>
  398. void lock_shared(T* lockable) {
  399. lockable->enter(0);
  400. }
  401. template <typename T>
  402. void unlock_shared(T* lockable) {
  403. lockable->leave();
  404. }
  405. };
  406. struct PosixRWLock {
  407. pthread_rwlock_t lock_;
  408. PosixRWLock() {
  409. pthread_rwlock_init(&lock_, nullptr);
  410. }
  411. ~PosixRWLock() {
  412. pthread_rwlock_destroy(&lock_);
  413. }
  414. void lock() {
  415. pthread_rwlock_wrlock(&lock_);
  416. }
  417. void unlock() {
  418. pthread_rwlock_unlock(&lock_);
  419. }
  420. void lock_shared() {
  421. pthread_rwlock_rdlock(&lock_);
  422. }
  423. void unlock_shared() {
  424. pthread_rwlock_unlock(&lock_);
  425. }
  426. };
  427. struct PosixMutex {
  428. pthread_mutex_t lock_;
  429. PosixMutex() {
  430. pthread_mutex_init(&lock_, nullptr);
  431. }
  432. ~PosixMutex() {
  433. pthread_mutex_destroy(&lock_);
  434. }
  435. void lock() {
  436. pthread_mutex_lock(&lock_);
  437. }
  438. void unlock() {
  439. pthread_mutex_unlock(&lock_);
  440. }
  441. void lock_shared() {
  442. pthread_mutex_lock(&lock_);
  443. }
  444. void unlock_shared() {
  445. pthread_mutex_unlock(&lock_);
  446. }
  447. };
  448. template <template <typename> class Atom, typename Lock, typename Locker>
  449. static void
  450. runContendedReaders(size_t numOps, size_t numThreads, bool useSeparateLocks) {
  451. char padding1[64];
  452. (void)padding1;
  453. Lock globalLock;
  454. int valueProtectedByLock = 10;
  455. char padding2[64];
  456. (void)padding2;
  457. Atom<bool> go(false);
  458. Atom<bool>* goPtr = &go; // workaround for clang bug
  459. vector<thread> threads(numThreads);
  460. BENCHMARK_SUSPEND {
  461. for (size_t t = 0; t < numThreads; ++t) {
  462. threads[t] = DSched::thread([&, t, numThreads] {
  463. Lock privateLock;
  464. Lock* lock = useSeparateLocks ? &privateLock : &globalLock;
  465. Locker locker;
  466. while (!goPtr->load()) {
  467. this_thread::yield();
  468. }
  469. for (size_t op = t; op < numOps; op += numThreads) {
  470. locker.lock_shared(lock);
  471. // note: folly::doNotOptimizeAway reads and writes to its arg,
  472. // so the following two lines are very different than a call
  473. // to folly::doNotOptimizeAway(valueProtectedByLock);
  474. auto copy = valueProtectedByLock;
  475. folly::doNotOptimizeAway(copy);
  476. locker.unlock_shared(lock);
  477. }
  478. });
  479. }
  480. }
  481. go.store(true);
  482. for (auto& thr : threads) {
  483. DSched::join(thr);
  484. }
  485. }
  486. static void
  487. folly_rwspin_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  488. runContendedReaders<atomic, RWSpinLock, Locker>(
  489. numOps, numThreads, useSeparateLocks);
  490. }
  491. static void
  492. shmtx_wr_pri_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  493. runContendedReaders<atomic, SharedMutexWritePriority, TokenLocker>(
  494. numOps, numThreads, useSeparateLocks);
  495. }
  496. static void
  497. shmtx_w_bare_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  498. runContendedReaders<atomic, SharedMutexWritePriority, Locker>(
  499. numOps, numThreads, useSeparateLocks);
  500. }
  501. static void
  502. shmtx_rd_pri_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  503. runContendedReaders<atomic, SharedMutexReadPriority, TokenLocker>(
  504. numOps, numThreads, useSeparateLocks);
  505. }
  506. static void
  507. shmtx_r_bare_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  508. runContendedReaders<atomic, SharedMutexReadPriority, Locker>(
  509. numOps, numThreads, useSeparateLocks);
  510. }
  511. static void
  512. folly_ticket_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  513. runContendedReaders<atomic, RWTicketSpinLock64, Locker>(
  514. numOps, numThreads, useSeparateLocks);
  515. }
  516. static void
  517. boost_shared_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  518. runContendedReaders<atomic, boost::shared_mutex, Locker>(
  519. numOps, numThreads, useSeparateLocks);
  520. }
  521. static void
  522. pthrd_rwlock_reads(uint32_t numOps, size_t numThreads, bool useSeparateLocks) {
  523. runContendedReaders<atomic, PosixRWLock, Locker>(
  524. numOps, numThreads, useSeparateLocks);
  525. }
  526. template <template <typename> class Atom, typename Lock, typename Locker>
  527. static void runMixed(
  528. size_t numOps,
  529. size_t numThreads,
  530. double writeFraction,
  531. bool useSeparateLocks) {
  532. char padding1[64];
  533. (void)padding1;
  534. Lock globalLock;
  535. int valueProtectedByLock = 0;
  536. char padding2[64];
  537. (void)padding2;
  538. Atom<bool> go(false);
  539. Atom<bool>* goPtr = &go; // workaround for clang bug
  540. vector<thread> threads(numThreads);
  541. BENCHMARK_SUSPEND {
  542. for (size_t t = 0; t < numThreads; ++t) {
  543. threads[t] = DSched::thread([&, t, numThreads] {
  544. struct drand48_data buffer;
  545. srand48_r(t, &buffer);
  546. long writeThreshold = writeFraction * 0x7fffffff;
  547. Lock privateLock;
  548. Lock* lock = useSeparateLocks ? &privateLock : &globalLock;
  549. Locker locker;
  550. while (!goPtr->load()) {
  551. this_thread::yield();
  552. }
  553. for (size_t op = t; op < numOps; op += numThreads) {
  554. long randVal;
  555. lrand48_r(&buffer, &randVal);
  556. bool writeOp = randVal < writeThreshold;
  557. if (writeOp) {
  558. locker.lock(lock);
  559. if (!useSeparateLocks) {
  560. ++valueProtectedByLock;
  561. }
  562. locker.unlock(lock);
  563. } else {
  564. locker.lock_shared(lock);
  565. auto v = valueProtectedByLock;
  566. folly::doNotOptimizeAway(v);
  567. locker.unlock_shared(lock);
  568. }
  569. }
  570. });
  571. }
  572. }
  573. go.store(true);
  574. for (auto& thr : threads) {
  575. DSched::join(thr);
  576. }
  577. }
  578. static void folly_rwspin(
  579. size_t numOps,
  580. size_t numThreads,
  581. double writeFraction,
  582. bool useSeparateLocks) {
  583. runMixed<atomic, RWSpinLock, Locker>(
  584. numOps, numThreads, writeFraction, useSeparateLocks);
  585. }
  586. static void shmtx_wr_pri(
  587. uint32_t numOps,
  588. size_t numThreads,
  589. double writeFraction,
  590. bool useSeparateLocks) {
  591. runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
  592. numOps, numThreads, writeFraction, useSeparateLocks);
  593. }
  594. static void shmtx_w_bare(
  595. uint32_t numOps,
  596. size_t numThreads,
  597. double writeFraction,
  598. bool useSeparateLocks) {
  599. runMixed<atomic, SharedMutexWritePriority, Locker>(
  600. numOps, numThreads, writeFraction, useSeparateLocks);
  601. }
  602. static void shmtx_rd_pri(
  603. uint32_t numOps,
  604. size_t numThreads,
  605. double writeFraction,
  606. bool useSeparateLocks) {
  607. runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
  608. numOps, numThreads, writeFraction, useSeparateLocks);
  609. }
  610. static void shmtx_r_bare(
  611. uint32_t numOps,
  612. size_t numThreads,
  613. double writeFraction,
  614. bool useSeparateLocks) {
  615. runMixed<atomic, SharedMutexReadPriority, Locker>(
  616. numOps, numThreads, writeFraction, useSeparateLocks);
  617. }
  618. static void folly_ticket(
  619. size_t numOps,
  620. size_t numThreads,
  621. double writeFraction,
  622. bool useSeparateLocks) {
  623. runMixed<atomic, RWTicketSpinLock64, Locker>(
  624. numOps, numThreads, writeFraction, useSeparateLocks);
  625. }
  626. static void boost_shared(
  627. size_t numOps,
  628. size_t numThreads,
  629. double writeFraction,
  630. bool useSeparateLocks) {
  631. runMixed<atomic, boost::shared_mutex, Locker>(
  632. numOps, numThreads, writeFraction, useSeparateLocks);
  633. }
  634. static void pthrd_rwlock(
  635. size_t numOps,
  636. size_t numThreads,
  637. double writeFraction,
  638. bool useSeparateLocks) {
  639. runMixed<atomic, PosixRWLock, Locker>(
  640. numOps, numThreads, writeFraction, useSeparateLocks);
  641. }
  642. static void pthrd_mutex_(
  643. size_t numOps,
  644. size_t numThreads,
  645. double writeFraction,
  646. bool useSeparateLocks) {
  647. runMixed<atomic, PosixMutex, Locker>(
  648. numOps, numThreads, writeFraction, useSeparateLocks);
  649. }
  650. template <typename Lock, template <typename> class Atom>
  651. static void runAllAndValidate(size_t numOps, size_t numThreads) {
  652. Lock globalLock;
  653. Atom<int> globalExclusiveCount(0);
  654. Atom<int> globalUpgradeCount(0);
  655. Atom<int> globalSharedCount(0);
  656. Atom<bool> go(false);
  657. // clang crashes on access to Atom<> captured by ref in closure
  658. Atom<int>* globalExclusiveCountPtr = &globalExclusiveCount;
  659. Atom<int>* globalUpgradeCountPtr = &globalUpgradeCount;
  660. Atom<int>* globalSharedCountPtr = &globalSharedCount;
  661. Atom<bool>* goPtr = &go;
  662. vector<thread> threads(numThreads);
  663. BENCHMARK_SUSPEND {
  664. for (size_t t = 0; t < numThreads; ++t) {
  665. threads[t] = DSched::thread([&, t, numThreads] {
  666. struct drand48_data buffer;
  667. srand48_r(t, &buffer);
  668. bool exclusive = false;
  669. bool upgrade = false;
  670. bool shared = false;
  671. bool ourGlobalTokenUsed = false;
  672. SharedMutexToken ourGlobalToken;
  673. Lock privateLock;
  674. vector<SharedMutexToken> privateTokens;
  675. while (!goPtr->load()) {
  676. this_thread::yield();
  677. }
  678. for (size_t op = t; op < numOps; op += numThreads) {
  679. // randVal in [0,1000)
  680. long randVal;
  681. lrand48_r(&buffer, &randVal);
  682. randVal = (long)((randVal * (uint64_t)1000) / 0x7fffffff);
  683. // make as many assertions as possible about the global state
  684. if (exclusive) {
  685. EXPECT_EQ(1, globalExclusiveCountPtr->load(memory_order_acquire));
  686. EXPECT_EQ(0, globalUpgradeCountPtr->load(memory_order_acquire));
  687. EXPECT_EQ(0, globalSharedCountPtr->load(memory_order_acquire));
  688. }
  689. if (upgrade) {
  690. EXPECT_EQ(0, globalExclusiveCountPtr->load(memory_order_acquire));
  691. EXPECT_EQ(1, globalUpgradeCountPtr->load(memory_order_acquire));
  692. }
  693. if (shared) {
  694. EXPECT_EQ(0, globalExclusiveCountPtr->load(memory_order_acquire));
  695. EXPECT_TRUE(globalSharedCountPtr->load(memory_order_acquire) > 0);
  696. } else {
  697. EXPECT_FALSE(ourGlobalTokenUsed);
  698. }
  699. // independent 20% chance we do something to the private lock
  700. if (randVal < 200) {
  701. // it's okay to take multiple private shared locks because
  702. // we never take an exclusive lock, so reader versus writer
  703. // priority doesn't cause deadlocks
  704. if (randVal < 100 && privateTokens.size() > 0) {
  705. auto i = randVal % privateTokens.size();
  706. privateLock.unlock_shared(privateTokens[i]);
  707. privateTokens.erase(privateTokens.begin() + i);
  708. } else {
  709. SharedMutexToken token;
  710. privateLock.lock_shared(token);
  711. privateTokens.push_back(token);
  712. }
  713. continue;
  714. }
  715. // if we've got a lock, the only thing we can do is release it
  716. // or transform it into a different kind of lock
  717. if (exclusive) {
  718. exclusive = false;
  719. --*globalExclusiveCountPtr;
  720. if (randVal < 500) {
  721. globalLock.unlock();
  722. } else if (randVal < 700) {
  723. globalLock.unlock_and_lock_shared();
  724. ++*globalSharedCountPtr;
  725. shared = true;
  726. } else if (randVal < 900) {
  727. globalLock.unlock_and_lock_shared(ourGlobalToken);
  728. ++*globalSharedCountPtr;
  729. shared = true;
  730. ourGlobalTokenUsed = true;
  731. } else {
  732. globalLock.unlock_and_lock_upgrade();
  733. ++*globalUpgradeCountPtr;
  734. upgrade = true;
  735. }
  736. } else if (upgrade) {
  737. upgrade = false;
  738. --*globalUpgradeCountPtr;
  739. if (randVal < 500) {
  740. globalLock.unlock_upgrade();
  741. } else if (randVal < 700) {
  742. globalLock.unlock_upgrade_and_lock_shared();
  743. ++*globalSharedCountPtr;
  744. shared = true;
  745. } else if (randVal < 900) {
  746. globalLock.unlock_upgrade_and_lock_shared(ourGlobalToken);
  747. ++*globalSharedCountPtr;
  748. shared = true;
  749. ourGlobalTokenUsed = true;
  750. } else {
  751. globalLock.unlock_upgrade_and_lock();
  752. ++*globalExclusiveCountPtr;
  753. exclusive = true;
  754. }
  755. } else if (shared) {
  756. shared = false;
  757. --*globalSharedCountPtr;
  758. if (ourGlobalTokenUsed) {
  759. globalLock.unlock_shared(ourGlobalToken);
  760. ourGlobalTokenUsed = false;
  761. } else {
  762. globalLock.unlock_shared();
  763. }
  764. } else if (randVal < 400) {
  765. // 40% chance of shared lock with token, 5 ways to get it
  766. // delta t goes from -1 millis to 7 millis
  767. auto dt = microseconds(10 * (randVal - 100));
  768. if (randVal < 400) {
  769. globalLock.lock_shared(ourGlobalToken);
  770. shared = true;
  771. } else if (randVal < 500) {
  772. shared = globalLock.try_lock_shared(ourGlobalToken);
  773. } else if (randVal < 600) {
  774. shared = globalLock.try_lock_shared_for(dt, ourGlobalToken);
  775. } else if (randVal < 800) {
  776. shared = globalLock.try_lock_shared_until(
  777. system_clock::now() + dt, ourGlobalToken);
  778. }
  779. if (shared) {
  780. ourGlobalTokenUsed = true;
  781. ++*globalSharedCountPtr;
  782. }
  783. } else if (randVal < 800) {
  784. // 40% chance of shared lock without token
  785. auto dt = microseconds(10 * (randVal - 100));
  786. if (randVal < 400) {
  787. globalLock.lock_shared();
  788. shared = true;
  789. } else if (randVal < 500) {
  790. shared = globalLock.try_lock_shared();
  791. } else if (randVal < 600) {
  792. shared = globalLock.try_lock_shared_for(dt);
  793. } else if (randVal < 800) {
  794. shared =
  795. globalLock.try_lock_shared_until(system_clock::now() + dt);
  796. }
  797. if (shared) {
  798. ++*globalSharedCountPtr;
  799. }
  800. } else if (randVal < 900) {
  801. // 10% change of upgrade lock
  802. globalLock.lock_upgrade();
  803. upgrade = true;
  804. ++*globalUpgradeCountPtr;
  805. } else {
  806. // 10% chance of exclusive lock, 5 ways to get it
  807. // delta t goes from -1 millis to 9 millis
  808. auto dt = microseconds(100 * (randVal - 910));
  809. if (randVal < 400) {
  810. globalLock.lock();
  811. exclusive = true;
  812. } else if (randVal < 500) {
  813. exclusive = globalLock.try_lock();
  814. } else if (randVal < 600) {
  815. exclusive = globalLock.try_lock_for(dt);
  816. } else if (randVal < 700) {
  817. exclusive = globalLock.try_lock_until(steady_clock::now() + dt);
  818. } else {
  819. exclusive = globalLock.try_lock_until(system_clock::now() + dt);
  820. }
  821. if (exclusive) {
  822. ++*globalExclusiveCountPtr;
  823. }
  824. }
  825. }
  826. if (exclusive) {
  827. --*globalExclusiveCountPtr;
  828. globalLock.unlock();
  829. }
  830. if (upgrade) {
  831. --*globalUpgradeCountPtr;
  832. globalLock.unlock_upgrade();
  833. }
  834. if (shared) {
  835. --*globalSharedCountPtr;
  836. if (ourGlobalTokenUsed) {
  837. globalLock.unlock_shared(ourGlobalToken);
  838. ourGlobalTokenUsed = false;
  839. } else {
  840. globalLock.unlock_shared();
  841. }
  842. }
  843. for (auto& token : privateTokens) {
  844. privateLock.unlock_shared(token);
  845. }
  846. });
  847. }
  848. }
  849. go.store(true);
  850. for (auto& thr : threads) {
  851. DSched::join(thr);
  852. }
  853. }
  854. TEST(SharedMutex, deterministic_concurrent_readers_of_one_lock_read_prio) {
  855. for (int pass = 0; pass < 3; ++pass) {
  856. DSched sched(DSched::uniform(pass));
  857. runContendedReaders<DeterministicAtomic, DSharedMutexReadPriority, Locker>(
  858. 1000, 3, false);
  859. }
  860. }
  861. TEST(SharedMutex, deterministic_concurrent_readers_of_one_lock_write_prio) {
  862. for (int pass = 0; pass < 3; ++pass) {
  863. DSched sched(DSched::uniform(pass));
  864. runContendedReaders<DeterministicAtomic, DSharedMutexWritePriority, Locker>(
  865. 1000, 3, false);
  866. }
  867. }
  868. TEST(SharedMutex, concurrent_readers_of_one_lock_read_prio) {
  869. for (int pass = 0; pass < 10; ++pass) {
  870. runContendedReaders<atomic, SharedMutexReadPriority, Locker>(
  871. 100000, 32, false);
  872. }
  873. }
  874. TEST(SharedMutex, concurrent_readers_of_one_lock_write_prio) {
  875. for (int pass = 0; pass < 10; ++pass) {
  876. runContendedReaders<atomic, SharedMutexWritePriority, Locker>(
  877. 100000, 32, false);
  878. }
  879. }
  880. TEST(SharedMutex, deterministic_readers_of_concurrent_locks_read_prio) {
  881. for (int pass = 0; pass < 3; ++pass) {
  882. DSched sched(DSched::uniform(pass));
  883. runContendedReaders<DeterministicAtomic, DSharedMutexReadPriority, Locker>(
  884. 1000, 3, true);
  885. }
  886. }
  887. TEST(SharedMutex, deterministic_readers_of_concurrent_locks_write_prio) {
  888. for (int pass = 0; pass < 3; ++pass) {
  889. DSched sched(DSched::uniform(pass));
  890. runContendedReaders<DeterministicAtomic, DSharedMutexWritePriority, Locker>(
  891. 1000, 3, true);
  892. }
  893. }
  894. TEST(SharedMutex, readers_of_concurrent_locks_read_prio) {
  895. for (int pass = 0; pass < 10; ++pass) {
  896. runContendedReaders<atomic, SharedMutexReadPriority, TokenLocker>(
  897. 100000, 32, true);
  898. }
  899. }
  900. TEST(SharedMutex, readers_of_concurrent_locks_write_prio) {
  901. for (int pass = 0; pass < 10; ++pass) {
  902. runContendedReaders<atomic, SharedMutexWritePriority, TokenLocker>(
  903. 100000, 32, true);
  904. }
  905. }
  906. TEST(SharedMutex, deterministic_mixed_mostly_read_read_prio) {
  907. for (int pass = 0; pass < 3; ++pass) {
  908. DSched sched(DSched::uniform(pass));
  909. runMixed<DeterministicAtomic, DSharedMutexReadPriority, Locker>(
  910. 1000, 3, 0.1, false);
  911. }
  912. }
  913. TEST(SharedMutex, deterministic_mixed_mostly_read_write_prio) {
  914. for (int pass = 0; pass < 3; ++pass) {
  915. DSched sched(DSched::uniform(pass));
  916. runMixed<DeterministicAtomic, DSharedMutexWritePriority, Locker>(
  917. 1000, 3, 0.1, false);
  918. }
  919. }
  920. TEST(SharedMutex, mixed_mostly_read_read_prio) {
  921. for (int pass = 0; pass < 5; ++pass) {
  922. runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
  923. 10000, 32, 0.1, false);
  924. }
  925. }
  926. TEST(SharedMutex, mixed_mostly_read_write_prio) {
  927. for (int pass = 0; pass < 5; ++pass) {
  928. runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
  929. 10000, 32, 0.1, false);
  930. }
  931. }
  932. TEST(SharedMutex, deterministic_mixed_mostly_write_read_prio) {
  933. for (int pass = 0; pass < 1; ++pass) {
  934. DSched sched(DSched::uniform(pass));
  935. runMixed<DeterministicAtomic, DSharedMutexReadPriority, TokenLocker>(
  936. 1000, 10, 0.9, false);
  937. }
  938. }
  939. TEST(SharedMutex, deterministic_mixed_mostly_write_write_prio) {
  940. for (int pass = 0; pass < 1; ++pass) {
  941. DSched sched(DSched::uniform(pass));
  942. runMixed<DeterministicAtomic, DSharedMutexWritePriority, TokenLocker>(
  943. 1000, 10, 0.9, false);
  944. }
  945. }
  946. TEST(SharedMutex, deterministic_lost_wakeup_write_prio) {
  947. for (int pass = 0; pass < 10; ++pass) {
  948. DSched sched(DSched::uniformSubset(pass, 2, 200));
  949. runMixed<DeterministicAtomic, DSharedMutexWritePriority, TokenLocker>(
  950. 1000, 3, 1.0, false);
  951. }
  952. }
  953. // In TSAN, tests run a lot slower. To avoid test timeouts, adjust the number
  954. // of repetitions we need for tests.
  955. static std::size_t adjustReps(std::size_t reps) {
  956. if (folly::kIsSanitizeThread) {
  957. return reps / 10;
  958. }
  959. return reps;
  960. }
  961. TEST(SharedMutex, mixed_mostly_write_read_prio) {
  962. for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) {
  963. runMixed<atomic, SharedMutexReadPriority, TokenLocker>(
  964. adjustReps(50000), adjustReps(300), 0.9, false);
  965. }
  966. }
  967. TEST(SharedMutex, mixed_mostly_write_write_prio) {
  968. for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 5); ++pass) {
  969. runMixed<atomic, SharedMutexWritePriority, TokenLocker>(
  970. adjustReps(50000), adjustReps(300), 0.9, false);
  971. }
  972. }
  973. TEST(SharedMutex, deterministic_all_ops_read_prio) {
  974. for (int pass = 0; pass < 5; ++pass) {
  975. DSched sched(DSched::uniform(pass));
  976. runAllAndValidate<DSharedMutexReadPriority, DeterministicAtomic>(1000, 8);
  977. }
  978. }
  979. TEST(SharedMutex, deterministic_all_ops_write_prio) {
  980. // This test fails in TSAN because of noisy lock ordering inversions.
  981. SKIP_IF(folly::kIsSanitizeThread);
  982. for (int pass = 0; pass < 5; ++pass) {
  983. DSched sched(DSched::uniform(pass));
  984. runAllAndValidate<DSharedMutexWritePriority, DeterministicAtomic>(1000, 8);
  985. }
  986. }
  987. TEST(SharedMutex, all_ops_read_prio) {
  988. for (int pass = 0; pass < 5; ++pass) {
  989. runAllAndValidate<SharedMutexReadPriority, atomic>(100000, 32);
  990. }
  991. }
  992. TEST(SharedMutex, all_ops_write_prio) {
  993. // This test fails in TSAN because of noisy lock ordering inversions.
  994. SKIP_IF(folly::kIsSanitizeThread);
  995. for (int pass = 0; pass < 5; ++pass) {
  996. runAllAndValidate<SharedMutexWritePriority, atomic>(100000, 32);
  997. }
  998. }
  999. FOLLY_ASSUME_FBVECTOR_COMPATIBLE(
  1000. boost::optional<boost::optional<SharedMutexToken>>)
  1001. // Setup is a set of threads that either grab a shared lock, or exclusive
  1002. // and then downgrade it, or upgrade then upgrade and downgrade, then
  1003. // enqueue the shared lock to a second set of threads that just performs
  1004. // unlocks. Half of the shared locks use tokens, the others don't.
  1005. template <typename Lock, template <typename> class Atom>
  1006. static void runRemoteUnlock(
  1007. size_t numOps,
  1008. double preWriteFraction,
  1009. double preUpgradeFraction,
  1010. size_t numSendingThreads,
  1011. size_t numReceivingThreads) {
  1012. Lock globalLock;
  1013. MPMCQueue<boost::optional<boost::optional<SharedMutexToken>>, Atom> queue(10);
  1014. auto queuePtr = &queue; // workaround for clang crash
  1015. Atom<bool> go(false);
  1016. auto goPtr = &go; // workaround for clang crash
  1017. Atom<int> pendingSenders(numSendingThreads);
  1018. auto pendingSendersPtr = &pendingSenders; // workaround for clang crash
  1019. vector<thread> threads(numSendingThreads + numReceivingThreads);
  1020. BENCHMARK_SUSPEND {
  1021. for (size_t t = 0; t < threads.size(); ++t) {
  1022. threads[t] = DSched::thread([&, t, numSendingThreads] {
  1023. if (t >= numSendingThreads) {
  1024. // we're a receiver
  1025. typename decltype(queue)::value_type elem;
  1026. while (true) {
  1027. queuePtr->blockingRead(elem);
  1028. if (!elem) {
  1029. // EOF, pass the EOF token
  1030. queuePtr->blockingWrite(std::move(elem));
  1031. break;
  1032. }
  1033. if (*elem) {
  1034. globalLock.unlock_shared(**elem);
  1035. } else {
  1036. globalLock.unlock_shared();
  1037. }
  1038. }
  1039. return;
  1040. }
  1041. // else we're a sender
  1042. struct drand48_data buffer;
  1043. srand48_r(t, &buffer);
  1044. while (!goPtr->load()) {
  1045. this_thread::yield();
  1046. }
  1047. for (size_t op = t; op < numOps; op += numSendingThreads) {
  1048. long unscaledRandVal;
  1049. lrand48_r(&buffer, &unscaledRandVal);
  1050. // randVal in [0,1]
  1051. double randVal = ((double)unscaledRandVal) / 0x7fffffff;
  1052. // extract a bit and rescale
  1053. bool useToken = randVal >= 0.5;
  1054. randVal = (randVal - (useToken ? 0.5 : 0.0)) * 2;
  1055. boost::optional<SharedMutexToken> maybeToken;
  1056. if (useToken) {
  1057. SharedMutexToken token;
  1058. if (randVal < preWriteFraction) {
  1059. globalLock.lock();
  1060. globalLock.unlock_and_lock_shared(token);
  1061. } else if (randVal < preWriteFraction + preUpgradeFraction / 2) {
  1062. globalLock.lock_upgrade();
  1063. globalLock.unlock_upgrade_and_lock_shared(token);
  1064. } else if (randVal < preWriteFraction + preUpgradeFraction) {
  1065. globalLock.lock_upgrade();
  1066. globalLock.unlock_upgrade_and_lock();
  1067. globalLock.unlock_and_lock_shared(token);
  1068. } else {
  1069. globalLock.lock_shared(token);
  1070. }
  1071. maybeToken = token;
  1072. } else {
  1073. if (randVal < preWriteFraction) {
  1074. globalLock.lock();
  1075. globalLock.unlock_and_lock_shared();
  1076. } else if (randVal < preWriteFraction + preUpgradeFraction / 2) {
  1077. globalLock.lock_upgrade();
  1078. globalLock.unlock_upgrade_and_lock_shared();
  1079. } else if (randVal < preWriteFraction + preUpgradeFraction) {
  1080. globalLock.lock_upgrade();
  1081. globalLock.unlock_upgrade_and_lock();
  1082. globalLock.unlock_and_lock_shared();
  1083. } else {
  1084. globalLock.lock_shared();
  1085. }
  1086. }
  1087. // blockingWrite is emplace-like, so this automatically adds
  1088. // another level of wrapping
  1089. queuePtr->blockingWrite(maybeToken);
  1090. }
  1091. if (--*pendingSendersPtr == 0) {
  1092. queuePtr->blockingWrite(boost::none);
  1093. }
  1094. });
  1095. }
  1096. }
  1097. go.store(true);
  1098. for (auto& thr : threads) {
  1099. DSched::join(thr);
  1100. }
  1101. }
  1102. TEST(SharedMutex, deterministic_remote_write_prio) {
  1103. // This test fails in an assertion in the TSAN library because there are too
  1104. // many mutexes
  1105. SKIP_IF(folly::kIsSanitizeThread);
  1106. for (int pass = 0; pass < 1; ++pass) {
  1107. DSched sched(DSched::uniform(pass));
  1108. runRemoteUnlock<DSharedMutexWritePriority, DeterministicAtomic>(
  1109. 500, 0.1, 0.1, 5, 5);
  1110. }
  1111. }
  1112. TEST(SharedMutex, deterministic_remote_read_prio) {
  1113. for (int pass = 0; pass < 1; ++pass) {
  1114. DSched sched(DSched::uniform(pass));
  1115. runRemoteUnlock<DSharedMutexReadPriority, DeterministicAtomic>(
  1116. 500, 0.1, 0.1, 5, 5);
  1117. }
  1118. }
  1119. TEST(SharedMutex, remote_write_prio) {
  1120. // This test fails in an assertion in the TSAN library because there are too
  1121. // many mutexes
  1122. SKIP_IF(folly::kIsSanitizeThread);
  1123. for (int pass = 0; pass < 10; ++pass) {
  1124. runRemoteUnlock<SharedMutexWritePriority, atomic>(100000, 0.1, 0.1, 5, 5);
  1125. }
  1126. }
  1127. TEST(SharedMutex, remote_read_prio) {
  1128. // This test fails in an assertion in the TSAN library because there are too
  1129. // many mutexes
  1130. SKIP_IF(folly::kIsSanitizeThread);
  1131. for (int pass = 0; pass < (folly::kIsSanitizeAddress ? 1 : 100); ++pass) {
  1132. runRemoteUnlock<SharedMutexReadPriority, atomic>(100000, 0.1, 0.1, 5, 5);
  1133. }
  1134. }
  1135. static void burn(size_t n) {
  1136. for (size_t i = 0; i < n; ++i) {
  1137. folly::doNotOptimizeAway(i);
  1138. }
  1139. }
  1140. // Two threads and three locks, arranged so that they have to proceed
  1141. // in turn with reader/writer conflict
  1142. template <typename Lock, template <typename> class Atom = atomic>
  1143. static void runPingPong(size_t numRounds, size_t burnCount) {
  1144. char padding1[56];
  1145. (void)padding1;
  1146. pair<Lock, char[56]> locks[3];
  1147. char padding2[56];
  1148. (void)padding2;
  1149. Atom<int> avail(0);
  1150. auto availPtr = &avail; // workaround for clang crash
  1151. Atom<bool> go(false);
  1152. auto goPtr = &go; // workaround for clang crash
  1153. vector<thread> threads(2);
  1154. locks[0].first.lock();
  1155. locks[1].first.lock();
  1156. locks[2].first.lock_shared();
  1157. BENCHMARK_SUSPEND {
  1158. threads[0] = DSched::thread([&] {
  1159. ++*availPtr;
  1160. while (!goPtr->load()) {
  1161. this_thread::yield();
  1162. }
  1163. for (size_t i = 0; i < numRounds; ++i) {
  1164. locks[i % 3].first.unlock();
  1165. locks[(i + 2) % 3].first.lock();
  1166. burn(burnCount);
  1167. }
  1168. });
  1169. threads[1] = DSched::thread([&] {
  1170. ++*availPtr;
  1171. while (!goPtr->load()) {
  1172. this_thread::yield();
  1173. }
  1174. for (size_t i = 0; i < numRounds; ++i) {
  1175. locks[i % 3].first.lock_shared();
  1176. burn(burnCount);
  1177. locks[(i + 2) % 3].first.unlock_shared();
  1178. }
  1179. });
  1180. while (avail.load() < 2) {
  1181. this_thread::yield();
  1182. }
  1183. }
  1184. go.store(true);
  1185. for (auto& thr : threads) {
  1186. DSched::join(thr);
  1187. }
  1188. locks[numRounds % 3].first.unlock();
  1189. locks[(numRounds + 1) % 3].first.unlock();
  1190. locks[(numRounds + 2) % 3].first.unlock_shared();
  1191. }
  1192. static void folly_rwspin_ping_pong(size_t n, size_t scale, size_t burnCount) {
  1193. runPingPong<RWSpinLock>(n / scale, burnCount);
  1194. }
  1195. static void shmtx_w_bare_ping_pong(size_t n, size_t scale, size_t burnCount) {
  1196. runPingPong<SharedMutexWritePriority>(n / scale, burnCount);
  1197. }
  1198. static void shmtx_r_bare_ping_pong(size_t n, size_t scale, size_t burnCount) {
  1199. runPingPong<SharedMutexReadPriority>(n / scale, burnCount);
  1200. }
  1201. static void folly_ticket_ping_pong(size_t n, size_t scale, size_t burnCount) {
  1202. runPingPong<RWTicketSpinLock64>(n / scale, burnCount);
  1203. }
  1204. static void boost_shared_ping_pong(size_t n, size_t scale, size_t burnCount) {
  1205. runPingPong<boost::shared_mutex>(n / scale, burnCount);
  1206. }
  1207. static void pthrd_rwlock_ping_pong(size_t n, size_t scale, size_t burnCount) {
  1208. runPingPong<PosixRWLock>(n / scale, burnCount);
  1209. }
  1210. TEST(SharedMutex, deterministic_ping_pong_write_prio) {
  1211. // This test fails in TSAN because some mutexes are lock_shared() in one
  1212. // thread and unlock_shared() in a different thread.
  1213. SKIP_IF(folly::kIsSanitizeThread);
  1214. for (int pass = 0; pass < 1; ++pass) {
  1215. DSched sched(DSched::uniform(pass));
  1216. runPingPong<DSharedMutexWritePriority, DeterministicAtomic>(500, 0);
  1217. }
  1218. }
  1219. TEST(SharedMutex, deterministic_ping_pong_read_prio) {
  1220. for (int pass = 0; pass < 1; ++pass) {
  1221. DSched sched(DSched::uniform(pass));
  1222. runPingPong<DSharedMutexReadPriority, DeterministicAtomic>(500, 0);
  1223. }
  1224. }
  1225. TEST(SharedMutex, ping_pong_write_prio) {
  1226. // This test fails in TSAN because some mutexes are lock_shared() in one
  1227. // thread and unlock_shared() in a different thread.
  1228. SKIP_IF(folly::kIsSanitizeThread);
  1229. for (int pass = 0; pass < 1; ++pass) {
  1230. runPingPong<SharedMutexWritePriority, atomic>(50000, 0);
  1231. }
  1232. }
  1233. TEST(SharedMutex, ping_pong_read_prio) {
  1234. for (int pass = 0; pass < 1; ++pass) {
  1235. runPingPong<SharedMutexReadPriority, atomic>(50000, 0);
  1236. }
  1237. }
  1238. // This is here so you can tell how much of the runtime reported by the
  1239. // more complex harnesses is due to the harness, although due to the
  1240. // magic of compiler optimization it may also be slower
  1241. BENCHMARK(single_thread_lock_shared_unlock_shared, iters) {
  1242. SharedMutex lock;
  1243. for (size_t n = 0; n < iters; ++n) {
  1244. SharedMutex::Token token;
  1245. lock.lock_shared(token);
  1246. folly::doNotOptimizeAway(0);
  1247. lock.unlock_shared(token);
  1248. }
  1249. }
  1250. BENCHMARK(single_thread_lock_unlock, iters) {
  1251. SharedMutex lock;
  1252. for (size_t n = 0; n < iters; ++n) {
  1253. lock.lock();
  1254. folly::doNotOptimizeAway(0);
  1255. lock.unlock();
  1256. }
  1257. }
  1258. #define BENCH_BASE(...) FB_VA_GLUE(BENCHMARK_NAMED_PARAM, (__VA_ARGS__))
  1259. #define BENCH_REL(...) FB_VA_GLUE(BENCHMARK_RELATIVE_NAMED_PARAM, (__VA_ARGS__))
  1260. // 100% reads. Best-case scenario for deferred locks. Lock is colocated
  1261. // with read data, so inline lock takes cache miss every time but deferred
  1262. // lock has only cache hits and local access.
  1263. BENCHMARK_DRAW_LINE();
  1264. BENCHMARK_DRAW_LINE();
  1265. BENCH_BASE(folly_rwspin_reads, 1thread, 1, false)
  1266. BENCH_REL(shmtx_wr_pri_reads, 1thread, 1, false)
  1267. BENCH_REL(shmtx_w_bare_reads, 1thread, 1, false)
  1268. BENCH_REL(shmtx_rd_pri_reads, 1thread, 1, false)
  1269. BENCH_REL(shmtx_r_bare_reads, 1thread, 1, false)
  1270. BENCH_REL(folly_ticket_reads, 1thread, 1, false)
  1271. BENCH_REL(boost_shared_reads, 1thread, 1, false)
  1272. BENCH_REL(pthrd_rwlock_reads, 1thread, 1, false)
  1273. BENCHMARK_DRAW_LINE();
  1274. BENCH_BASE(folly_rwspin_reads, 2thread, 2, false)
  1275. BENCH_REL(shmtx_wr_pri_reads, 2thread, 2, false)
  1276. BENCH_REL(shmtx_w_bare_reads, 2thread, 2, false)
  1277. BENCH_REL(shmtx_rd_pri_reads, 2thread, 2, false)
  1278. BENCH_REL(shmtx_r_bare_reads, 2thread, 2, false)
  1279. BENCH_REL(folly_ticket_reads, 2thread, 2, false)
  1280. BENCH_REL(boost_shared_reads, 2thread, 2, false)
  1281. BENCH_REL(pthrd_rwlock_reads, 2thread, 2, false)
  1282. BENCHMARK_DRAW_LINE();
  1283. BENCH_BASE(folly_rwspin_reads, 4thread, 4, false)
  1284. BENCH_REL(shmtx_wr_pri_reads, 4thread, 4, false)
  1285. BENCH_REL(shmtx_w_bare_reads, 4thread, 4, false)
  1286. BENCH_REL(shmtx_rd_pri_reads, 4thread, 4, false)
  1287. BENCH_REL(shmtx_r_bare_reads, 4thread, 4, false)
  1288. BENCH_REL(folly_ticket_reads, 4thread, 4, false)
  1289. BENCH_REL(boost_shared_reads, 4thread, 4, false)
  1290. BENCH_REL(pthrd_rwlock_reads, 4thread, 4, false)
  1291. BENCHMARK_DRAW_LINE();
  1292. BENCH_BASE(folly_rwspin_reads, 8thread, 8, false)
  1293. BENCH_REL(shmtx_wr_pri_reads, 8thread, 8, false)
  1294. BENCH_REL(shmtx_w_bare_reads, 8thread, 8, false)
  1295. BENCH_REL(shmtx_rd_pri_reads, 8thread, 8, false)
  1296. BENCH_REL(shmtx_r_bare_reads, 8thread, 8, false)
  1297. BENCH_REL(folly_ticket_reads, 8thread, 8, false)
  1298. BENCH_REL(boost_shared_reads, 8thread, 8, false)
  1299. BENCH_REL(pthrd_rwlock_reads, 8thread, 8, false)
  1300. BENCHMARK_DRAW_LINE();
  1301. BENCH_BASE(folly_rwspin_reads, 16thread, 16, false)
  1302. BENCH_REL(shmtx_wr_pri_reads, 16thread, 16, false)
  1303. BENCH_REL(shmtx_w_bare_reads, 16thread, 16, false)
  1304. BENCH_REL(shmtx_rd_pri_reads, 16thread, 16, false)
  1305. BENCH_REL(shmtx_r_bare_reads, 16thread, 16, false)
  1306. BENCH_REL(folly_ticket_reads, 16thread, 16, false)
  1307. BENCH_REL(boost_shared_reads, 16thread, 16, false)
  1308. BENCH_REL(pthrd_rwlock_reads, 16thread, 16, false)
  1309. BENCHMARK_DRAW_LINE();
  1310. BENCH_BASE(folly_rwspin_reads, 32thread, 32, false)
  1311. BENCH_REL(shmtx_wr_pri_reads, 32thread, 32, false)
  1312. BENCH_REL(shmtx_w_bare_reads, 32thread, 32, false)
  1313. BENCH_REL(shmtx_rd_pri_reads, 32thread, 32, false)
  1314. BENCH_REL(shmtx_r_bare_reads, 32thread, 32, false)
  1315. BENCH_REL(folly_ticket_reads, 32thread, 32, false)
  1316. BENCH_REL(boost_shared_reads, 32thread, 32, false)
  1317. BENCH_REL(pthrd_rwlock_reads, 32thread, 32, false)
  1318. BENCHMARK_DRAW_LINE();
  1319. BENCH_BASE(folly_rwspin_reads, 64thread, 64, false)
  1320. BENCH_REL(shmtx_wr_pri_reads, 64thread, 64, false)
  1321. BENCH_REL(shmtx_w_bare_reads, 64thread, 64, false)
  1322. BENCH_REL(shmtx_rd_pri_reads, 64thread, 64, false)
  1323. BENCH_REL(shmtx_r_bare_reads, 64thread, 64, false)
  1324. BENCH_REL(folly_ticket_reads, 64thread, 64, false)
  1325. BENCH_REL(boost_shared_reads, 64thread, 64, false)
  1326. BENCH_REL(pthrd_rwlock_reads, 64thread, 64, false)
  1327. // 1 lock used by everybody, 100% writes. Threads only hurt, but it is
  1328. // good to not fail catastrophically. Compare to single_thread_lock_unlock
  1329. // to see the overhead of the generic driver (and its pseudo-random number
  1330. // generator). pthrd_mutex_ is a pthread_mutex_t (default, not adaptive),
  1331. // which is better than any of the reader-writer locks for this scenario.
  1332. BENCHMARK_DRAW_LINE();
  1333. BENCHMARK_DRAW_LINE();
  1334. BENCH_BASE(folly_rwspin, 1thread_all_write, 1, 1.0, false)
  1335. BENCH_REL(shmtx_wr_pri, 1thread_all_write, 1, 1.0, false)
  1336. BENCH_REL(shmtx_rd_pri, 1thread_all_write, 1, 1.0, false)
  1337. BENCH_REL(folly_ticket, 1thread_all_write, 1, 1.0, false)
  1338. BENCH_REL(boost_shared, 1thread_all_write, 1, 1.0, false)
  1339. BENCH_REL(pthrd_rwlock, 1thread_all_write, 1, 1.0, false)
  1340. BENCH_REL(pthrd_mutex_, 1thread_all_write, 1, 1.0, false)
  1341. BENCHMARK_DRAW_LINE();
  1342. BENCH_BASE(folly_rwspin, 2thread_all_write, 2, 1.0, false)
  1343. BENCH_REL(shmtx_wr_pri, 2thread_all_write, 2, 1.0, false)
  1344. BENCH_REL(shmtx_rd_pri, 2thread_all_write, 2, 1.0, false)
  1345. BENCH_REL(folly_ticket, 2thread_all_write, 2, 1.0, false)
  1346. BENCH_REL(boost_shared, 2thread_all_write, 2, 1.0, false)
  1347. BENCH_REL(pthrd_rwlock, 2thread_all_write, 2, 1.0, false)
  1348. BENCH_REL(pthrd_mutex_, 2thread_all_write, 2, 1.0, false)
  1349. BENCHMARK_DRAW_LINE();
  1350. BENCH_BASE(folly_rwspin, 4thread_all_write, 4, 1.0, false)
  1351. BENCH_REL(shmtx_wr_pri, 4thread_all_write, 4, 1.0, false)
  1352. BENCH_REL(shmtx_rd_pri, 4thread_all_write, 4, 1.0, false)
  1353. BENCH_REL(folly_ticket, 4thread_all_write, 4, 1.0, false)
  1354. BENCH_REL(boost_shared, 4thread_all_write, 4, 1.0, false)
  1355. BENCH_REL(pthrd_rwlock, 4thread_all_write, 4, 1.0, false)
  1356. BENCH_REL(pthrd_mutex_, 4thread_all_write, 4, 1.0, false)
  1357. BENCHMARK_DRAW_LINE();
  1358. BENCH_BASE(folly_rwspin, 8thread_all_write, 8, 1.0, false)
  1359. BENCH_REL(shmtx_wr_pri, 8thread_all_write, 8, 1.0, false)
  1360. BENCH_REL(shmtx_rd_pri, 8thread_all_write, 8, 1.0, false)
  1361. BENCH_REL(folly_ticket, 8thread_all_write, 8, 1.0, false)
  1362. BENCH_REL(boost_shared, 8thread_all_write, 8, 1.0, false)
  1363. BENCH_REL(pthrd_rwlock, 8thread_all_write, 8, 1.0, false)
  1364. BENCH_REL(pthrd_mutex_, 8thread_all_write, 8, 1.0, false)
  1365. BENCHMARK_DRAW_LINE();
  1366. BENCH_BASE(folly_rwspin, 16thread_all_write, 16, 1.0, false)
  1367. BENCH_REL(shmtx_wr_pri, 16thread_all_write, 16, 1.0, false)
  1368. BENCH_REL(shmtx_rd_pri, 16thread_all_write, 16, 1.0, false)
  1369. BENCH_REL(folly_ticket, 16thread_all_write, 16, 1.0, false)
  1370. BENCH_REL(boost_shared, 16thread_all_write, 16, 1.0, false)
  1371. BENCH_REL(pthrd_rwlock, 16thread_all_write, 16, 1.0, false)
  1372. BENCH_REL(pthrd_mutex_, 16thread_all_write, 16, 1.0, false)
  1373. BENCHMARK_DRAW_LINE();
  1374. BENCH_BASE(folly_rwspin, 32thread_all_write, 32, 1.0, false)
  1375. BENCH_REL(shmtx_wr_pri, 32thread_all_write, 32, 1.0, false)
  1376. BENCH_REL(shmtx_rd_pri, 32thread_all_write, 32, 1.0, false)
  1377. BENCH_REL(folly_ticket, 32thread_all_write, 32, 1.0, false)
  1378. BENCH_REL(boost_shared, 32thread_all_write, 32, 1.0, false)
  1379. BENCH_REL(pthrd_rwlock, 32thread_all_write, 32, 1.0, false)
  1380. BENCH_REL(pthrd_mutex_, 32thread_all_write, 32, 1.0, false)
  1381. BENCHMARK_DRAW_LINE();
  1382. BENCH_BASE(folly_rwspin, 64thread_all_write, 64, 1.0, false)
  1383. BENCH_REL(shmtx_wr_pri, 64thread_all_write, 64, 1.0, false)
  1384. BENCH_REL(shmtx_rd_pri, 64thread_all_write, 64, 1.0, false)
  1385. BENCH_REL(folly_ticket, 64thread_all_write, 64, 1.0, false)
  1386. BENCH_REL(boost_shared, 64thread_all_write, 64, 1.0, false)
  1387. BENCH_REL(pthrd_rwlock, 64thread_all_write, 64, 1.0, false)
  1388. BENCH_REL(pthrd_mutex_, 64thread_all_write, 64, 1.0, false)
  1389. // 1 lock used by everybody, 10% writes. Not much scaling to be had. Perf
  1390. // is best at 1 thread, once you've got multiple threads > 8 threads hurts.
  1391. BENCHMARK_DRAW_LINE();
  1392. BENCHMARK_DRAW_LINE();
  1393. BENCH_BASE(folly_rwspin, 1thread_10pct_write, 1, 0.10, false)
  1394. BENCH_REL(shmtx_wr_pri, 1thread_10pct_write, 1, 0.10, false)
  1395. BENCH_REL(shmtx_rd_pri, 1thread_10pct_write, 1, 0.10, false)
  1396. BENCH_REL(folly_ticket, 1thread_10pct_write, 1, 0.10, false)
  1397. BENCH_REL(boost_shared, 1thread_10pct_write, 1, 0.10, false)
  1398. BENCH_REL(pthrd_rwlock, 1thread_10pct_write, 1, 0.10, false)
  1399. BENCHMARK_DRAW_LINE();
  1400. BENCH_BASE(folly_rwspin, 2thread_10pct_write, 2, 0.10, false)
  1401. BENCH_REL(shmtx_wr_pri, 2thread_10pct_write, 2, 0.10, false)
  1402. BENCH_REL(shmtx_rd_pri, 2thread_10pct_write, 2, 0.10, false)
  1403. BENCH_REL(folly_ticket, 2thread_10pct_write, 2, 0.10, false)
  1404. BENCH_REL(boost_shared, 2thread_10pct_write, 2, 0.10, false)
  1405. BENCH_REL(pthrd_rwlock, 2thread_10pct_write, 2, 0.10, false)
  1406. BENCHMARK_DRAW_LINE();
  1407. BENCH_BASE(folly_rwspin, 4thread_10pct_write, 4, 0.10, false)
  1408. BENCH_REL(shmtx_wr_pri, 4thread_10pct_write, 4, 0.10, false)
  1409. BENCH_REL(shmtx_rd_pri, 4thread_10pct_write, 4, 0.10, false)
  1410. BENCH_REL(folly_ticket, 4thread_10pct_write, 4, 0.10, false)
  1411. BENCH_REL(boost_shared, 4thread_10pct_write, 4, 0.10, false)
  1412. BENCH_REL(pthrd_rwlock, 4thread_10pct_write, 4, 0.10, false)
  1413. BENCHMARK_DRAW_LINE();
  1414. BENCH_BASE(folly_rwspin, 8thread_10pct_write, 8, 0.10, false)
  1415. BENCH_REL(shmtx_wr_pri, 8thread_10pct_write, 8, 0.10, false)
  1416. BENCH_REL(shmtx_rd_pri, 8thread_10pct_write, 8, 0.10, false)
  1417. BENCH_REL(folly_ticket, 8thread_10pct_write, 8, 0.10, false)
  1418. BENCH_REL(boost_shared, 8thread_10pct_write, 8, 0.10, false)
  1419. BENCH_REL(pthrd_rwlock, 8thread_10pct_write, 8, 0.10, false)
  1420. BENCHMARK_DRAW_LINE();
  1421. BENCH_BASE(folly_rwspin, 16thread_10pct_write, 16, 0.10, false)
  1422. BENCH_REL(shmtx_wr_pri, 16thread_10pct_write, 16, 0.10, false)
  1423. BENCH_REL(shmtx_rd_pri, 16thread_10pct_write, 16, 0.10, false)
  1424. BENCH_REL(folly_ticket, 16thread_10pct_write, 16, 0.10, false)
  1425. BENCH_REL(boost_shared, 16thread_10pct_write, 16, 0.10, false)
  1426. BENCH_REL(pthrd_rwlock, 16thread_10pct_write, 16, 0.10, false)
  1427. BENCHMARK_DRAW_LINE();
  1428. BENCH_BASE(folly_rwspin, 32thread_10pct_write, 32, 0.10, false)
  1429. BENCH_REL(shmtx_wr_pri, 32thread_10pct_write, 32, 0.10, false)
  1430. BENCH_REL(shmtx_rd_pri, 32thread_10pct_write, 32, 0.10, false)
  1431. BENCH_REL(folly_ticket, 32thread_10pct_write, 32, 0.10, false)
  1432. BENCH_REL(boost_shared, 32thread_10pct_write, 32, 0.10, false)
  1433. BENCH_REL(pthrd_rwlock, 32thread_10pct_write, 32, 0.10, false)
  1434. BENCHMARK_DRAW_LINE();
  1435. BENCH_BASE(folly_rwspin, 64thread_10pct_write, 64, 0.10, false)
  1436. BENCH_REL(shmtx_wr_pri, 64thread_10pct_write, 64, 0.10, false)
  1437. BENCH_REL(shmtx_rd_pri, 64thread_10pct_write, 64, 0.10, false)
  1438. BENCH_REL(folly_ticket, 64thread_10pct_write, 64, 0.10, false)
  1439. BENCH_REL(boost_shared, 64thread_10pct_write, 64, 0.10, false)
  1440. BENCH_REL(pthrd_rwlock, 64thread_10pct_write, 64, 0.10, false)
  1441. // 1 lock used by everybody, 1% writes. This is a more realistic example
  1442. // than the concurrent_*_reads benchmark, but still shows SharedMutex locks
  1443. // winning over all of the others
  1444. BENCHMARK_DRAW_LINE();
  1445. BENCHMARK_DRAW_LINE();
  1446. BENCH_BASE(folly_rwspin, 1thread_1pct_write, 1, 0.01, false)
  1447. BENCH_REL(shmtx_wr_pri, 1thread_1pct_write, 1, 0.01, false)
  1448. BENCH_REL(shmtx_w_bare, 1thread_1pct_write, 1, 0.01, false)
  1449. BENCH_REL(shmtx_rd_pri, 1thread_1pct_write, 1, 0.01, false)
  1450. BENCH_REL(shmtx_r_bare, 1thread_1pct_write, 1, 0.01, false)
  1451. BENCH_REL(folly_ticket, 1thread_1pct_write, 1, 0.01, false)
  1452. BENCH_REL(boost_shared, 1thread_1pct_write, 1, 0.01, false)
  1453. BENCH_REL(pthrd_rwlock, 1thread_1pct_write, 1, 0.01, false)
  1454. BENCHMARK_DRAW_LINE();
  1455. BENCH_BASE(folly_rwspin, 2thread_1pct_write, 2, 0.01, false)
  1456. BENCH_REL(shmtx_wr_pri, 2thread_1pct_write, 2, 0.01, false)
  1457. BENCH_REL(shmtx_w_bare, 2thread_1pct_write, 2, 0.01, false)
  1458. BENCH_REL(shmtx_rd_pri, 2thread_1pct_write, 2, 0.01, false)
  1459. BENCH_REL(shmtx_r_bare, 2thread_1pct_write, 2, 0.01, false)
  1460. BENCH_REL(folly_ticket, 2thread_1pct_write, 2, 0.01, false)
  1461. BENCH_REL(boost_shared, 2thread_1pct_write, 2, 0.01, false)
  1462. BENCH_REL(pthrd_rwlock, 2thread_1pct_write, 2, 0.01, false)
  1463. BENCHMARK_DRAW_LINE();
  1464. BENCH_BASE(folly_rwspin, 4thread_1pct_write, 4, 0.01, false)
  1465. BENCH_REL(shmtx_wr_pri, 4thread_1pct_write, 4, 0.01, false)
  1466. BENCH_REL(shmtx_w_bare, 4thread_1pct_write, 4, 0.01, false)
  1467. BENCH_REL(shmtx_rd_pri, 4thread_1pct_write, 4, 0.01, false)
  1468. BENCH_REL(shmtx_r_bare, 4thread_1pct_write, 4, 0.01, false)
  1469. BENCH_REL(folly_ticket, 4thread_1pct_write, 4, 0.01, false)
  1470. BENCH_REL(boost_shared, 4thread_1pct_write, 4, 0.01, false)
  1471. BENCH_REL(pthrd_rwlock, 4thread_1pct_write, 4, 0.01, false)
  1472. BENCHMARK_DRAW_LINE();
  1473. BENCH_BASE(folly_rwspin, 8thread_1pct_write, 8, 0.01, false)
  1474. BENCH_REL(shmtx_wr_pri, 8thread_1pct_write, 8, 0.01, false)
  1475. BENCH_REL(shmtx_w_bare, 8thread_1pct_write, 8, 0.01, false)
  1476. BENCH_REL(shmtx_rd_pri, 8thread_1pct_write, 8, 0.01, false)
  1477. BENCH_REL(shmtx_r_bare, 8thread_1pct_write, 8, 0.01, false)
  1478. BENCH_REL(folly_ticket, 8thread_1pct_write, 8, 0.01, false)
  1479. BENCH_REL(boost_shared, 8thread_1pct_write, 8, 0.01, false)
  1480. BENCH_REL(pthrd_rwlock, 8thread_1pct_write, 8, 0.01, false)
  1481. BENCHMARK_DRAW_LINE();
  1482. BENCH_BASE(folly_rwspin, 16thread_1pct_write, 16, 0.01, false)
  1483. BENCH_REL(shmtx_wr_pri, 16thread_1pct_write, 16, 0.01, false)
  1484. BENCH_REL(shmtx_w_bare, 16thread_1pct_write, 16, 0.01, false)
  1485. BENCH_REL(shmtx_rd_pri, 16thread_1pct_write, 16, 0.01, false)
  1486. BENCH_REL(shmtx_r_bare, 16thread_1pct_write, 16, 0.01, false)
  1487. BENCH_REL(folly_ticket, 16thread_1pct_write, 16, 0.01, false)
  1488. BENCH_REL(boost_shared, 16thread_1pct_write, 16, 0.01, false)
  1489. BENCH_REL(pthrd_rwlock, 16thread_1pct_write, 16, 0.01, false)
  1490. BENCHMARK_DRAW_LINE();
  1491. BENCH_BASE(folly_rwspin, 32thread_1pct_write, 32, 0.01, false)
  1492. BENCH_REL(shmtx_wr_pri, 32thread_1pct_write, 32, 0.01, false)
  1493. BENCH_REL(shmtx_w_bare, 32thread_1pct_write, 32, 0.01, false)
  1494. BENCH_REL(shmtx_rd_pri, 32thread_1pct_write, 32, 0.01, false)
  1495. BENCH_REL(shmtx_r_bare, 32thread_1pct_write, 32, 0.01, false)
  1496. BENCH_REL(folly_ticket, 32thread_1pct_write, 32, 0.01, false)
  1497. BENCH_REL(boost_shared, 32thread_1pct_write, 32, 0.01, false)
  1498. BENCH_REL(pthrd_rwlock, 32thread_1pct_write, 32, 0.01, false)
  1499. BENCHMARK_DRAW_LINE();
  1500. BENCH_BASE(folly_rwspin, 64thread_1pct_write, 64, 0.01, false)
  1501. BENCH_REL(shmtx_wr_pri, 64thread_1pct_write, 64, 0.01, false)
  1502. BENCH_REL(shmtx_w_bare, 64thread_1pct_write, 64, 0.01, false)
  1503. BENCH_REL(shmtx_rd_pri, 64thread_1pct_write, 64, 0.01, false)
  1504. BENCH_REL(shmtx_r_bare, 64thread_1pct_write, 64, 0.01, false)
  1505. BENCH_REL(folly_ticket, 64thread_1pct_write, 64, 0.01, false)
  1506. BENCH_REL(boost_shared, 64thread_1pct_write, 64, 0.01, false)
  1507. BENCH_REL(pthrd_rwlock, 64thread_1pct_write, 64, 0.01, false)
  1508. // Worst case scenario for deferred locks. No actual sharing, likely that
  1509. // read operations will have to first set the kDeferredReadersPossibleBit,
  1510. // and likely that writers will have to scan deferredReaders[].
  1511. BENCHMARK_DRAW_LINE();
  1512. BENCH_BASE(folly_rwspin, 2thr_2lock_50pct_write, 2, 0.50, true)
  1513. BENCH_REL(shmtx_wr_pri, 2thr_2lock_50pct_write, 2, 0.50, true)
  1514. BENCH_REL(shmtx_rd_pri, 2thr_2lock_50pct_write, 2, 0.50, true)
  1515. BENCH_BASE(folly_rwspin, 4thr_4lock_50pct_write, 4, 0.50, true)
  1516. BENCH_REL(shmtx_wr_pri, 4thr_4lock_50pct_write, 4, 0.50, true)
  1517. BENCH_REL(shmtx_rd_pri, 4thr_4lock_50pct_write, 4, 0.50, true)
  1518. BENCH_BASE(folly_rwspin, 8thr_8lock_50pct_write, 8, 0.50, true)
  1519. BENCH_REL(shmtx_wr_pri, 8thr_8lock_50pct_write, 8, 0.50, true)
  1520. BENCH_REL(shmtx_rd_pri, 8thr_8lock_50pct_write, 8, 0.50, true)
  1521. BENCH_BASE(folly_rwspin, 16thr_16lock_50pct_write, 16, 0.50, true)
  1522. BENCH_REL(shmtx_wr_pri, 16thr_16lock_50pct_write, 16, 0.50, true)
  1523. BENCH_REL(shmtx_rd_pri, 16thr_16lock_50pct_write, 16, 0.50, true)
  1524. BENCH_BASE(folly_rwspin, 32thr_32lock_50pct_write, 32, 0.50, true)
  1525. BENCH_REL(shmtx_wr_pri, 32thr_32lock_50pct_write, 32, 0.50, true)
  1526. BENCH_REL(shmtx_rd_pri, 32thr_32lock_50pct_write, 32, 0.50, true)
  1527. BENCH_BASE(folly_rwspin, 64thr_64lock_50pct_write, 64, 0.50, true)
  1528. BENCH_REL(shmtx_wr_pri, 64thr_64lock_50pct_write, 64, 0.50, true)
  1529. BENCH_REL(shmtx_rd_pri, 64thr_64lock_50pct_write, 64, 0.50, true)
  1530. BENCHMARK_DRAW_LINE();
  1531. BENCH_BASE(folly_rwspin, 2thr_2lock_10pct_write, 2, 0.10, true)
  1532. BENCH_REL(shmtx_wr_pri, 2thr_2lock_10pct_write, 2, 0.10, true)
  1533. BENCH_REL(shmtx_rd_pri, 2thr_2lock_10pct_write, 2, 0.10, true)
  1534. BENCH_BASE(folly_rwspin, 4thr_4lock_10pct_write, 4, 0.10, true)
  1535. BENCH_REL(shmtx_wr_pri, 4thr_4lock_10pct_write, 4, 0.10, true)
  1536. BENCH_REL(shmtx_rd_pri, 4thr_4lock_10pct_write, 4, 0.10, true)
  1537. BENCH_BASE(folly_rwspin, 8thr_8lock_10pct_write, 8, 0.10, true)
  1538. BENCH_REL(shmtx_wr_pri, 8thr_8lock_10pct_write, 8, 0.10, true)
  1539. BENCH_REL(shmtx_rd_pri, 8thr_8lock_10pct_write, 8, 0.10, true)
  1540. BENCH_BASE(folly_rwspin, 16thr_16lock_10pct_write, 16, 0.10, true)
  1541. BENCH_REL(shmtx_wr_pri, 16thr_16lock_10pct_write, 16, 0.10, true)
  1542. BENCH_REL(shmtx_rd_pri, 16thr_16lock_10pct_write, 16, 0.10, true)
  1543. BENCH_BASE(folly_rwspin, 32thr_32lock_10pct_write, 32, 0.10, true)
  1544. BENCH_REL(shmtx_wr_pri, 32thr_32lock_10pct_write, 32, 0.10, true)
  1545. BENCH_REL(shmtx_rd_pri, 32thr_32lock_10pct_write, 32, 0.10, true)
  1546. BENCH_BASE(folly_rwspin, 64thr_64lock_10pct_write, 64, 0.10, true)
  1547. BENCH_REL(shmtx_wr_pri, 64thr_64lock_10pct_write, 64, 0.10, true)
  1548. BENCH_REL(shmtx_rd_pri, 64thr_64lock_10pct_write, 64, 0.10, true)
  1549. BENCHMARK_DRAW_LINE();
  1550. BENCH_BASE(folly_rwspin, 2thr_2lock_1pct_write, 2, 0.01, true)
  1551. BENCH_REL(shmtx_wr_pri, 2thr_2lock_1pct_write, 2, 0.01, true)
  1552. BENCH_REL(shmtx_rd_pri, 2thr_2lock_1pct_write, 2, 0.01, true)
  1553. BENCH_BASE(folly_rwspin, 4thr_4lock_1pct_write, 4, 0.01, true)
  1554. BENCH_REL(shmtx_wr_pri, 4thr_4lock_1pct_write, 4, 0.01, true)
  1555. BENCH_REL(shmtx_rd_pri, 4thr_4lock_1pct_write, 4, 0.01, true)
  1556. BENCH_BASE(folly_rwspin, 8thr_8lock_1pct_write, 8, 0.01, true)
  1557. BENCH_REL(shmtx_wr_pri, 8thr_8lock_1pct_write, 8, 0.01, true)
  1558. BENCH_REL(shmtx_rd_pri, 8thr_8lock_1pct_write, 8, 0.01, true)
  1559. BENCH_BASE(folly_rwspin, 16thr_16lock_1pct_write, 16, 0.01, true)
  1560. BENCH_REL(shmtx_wr_pri, 16thr_16lock_1pct_write, 16, 0.01, true)
  1561. BENCH_REL(shmtx_rd_pri, 16thr_16lock_1pct_write, 16, 0.01, true)
  1562. BENCH_BASE(folly_rwspin, 32thr_32lock_1pct_write, 32, 0.01, true)
  1563. BENCH_REL(shmtx_wr_pri, 32thr_32lock_1pct_write, 32, 0.01, true)
  1564. BENCH_REL(shmtx_rd_pri, 32thr_32lock_1pct_write, 32, 0.01, true)
  1565. BENCH_BASE(folly_rwspin, 64thr_64lock_1pct_write, 64, 0.01, true)
  1566. BENCH_REL(shmtx_wr_pri, 64thr_64lock_1pct_write, 64, 0.01, true)
  1567. BENCH_REL(shmtx_rd_pri, 64thr_64lock_1pct_write, 64, 0.01, true)
  1568. // Ping-pong tests have a scaled number of iterations, because their burn
  1569. // loop would make them too slow otherwise. Ping-pong with burn count of
  1570. // 100k or 300k shows the advantage of soft-spin, reducing the cost of
  1571. // each wakeup by about 20 usec. (Take benchmark reported difference,
  1572. // ~400 nanos, multiply by the scale of 100, then divide by 2 because
  1573. // each round has two wakeups.)
  1574. BENCHMARK_DRAW_LINE();
  1575. BENCHMARK_DRAW_LINE();
  1576. BENCH_BASE(folly_rwspin_ping_pong, burn0, 1, 0)
  1577. BENCH_REL(shmtx_w_bare_ping_pong, burn0, 1, 0)
  1578. BENCH_REL(shmtx_r_bare_ping_pong, burn0, 1, 0)
  1579. BENCH_REL(folly_ticket_ping_pong, burn0, 1, 0)
  1580. BENCH_REL(boost_shared_ping_pong, burn0, 1, 0)
  1581. BENCH_REL(pthrd_rwlock_ping_pong, burn0, 1, 0)
  1582. BENCHMARK_DRAW_LINE();
  1583. BENCH_BASE(folly_rwspin_ping_pong, burn100k, 100, 100000)
  1584. BENCH_REL(shmtx_w_bare_ping_pong, burn100k, 100, 100000)
  1585. BENCH_REL(shmtx_r_bare_ping_pong, burn100k, 100, 100000)
  1586. BENCH_REL(folly_ticket_ping_pong, burn100k, 100, 100000)
  1587. BENCH_REL(boost_shared_ping_pong, burn100k, 100, 100000)
  1588. BENCH_REL(pthrd_rwlock_ping_pong, burn100k, 100, 100000)
  1589. BENCHMARK_DRAW_LINE();
  1590. BENCH_BASE(folly_rwspin_ping_pong, burn300k, 100, 300000)
  1591. BENCH_REL(shmtx_w_bare_ping_pong, burn300k, 100, 300000)
  1592. BENCH_REL(shmtx_r_bare_ping_pong, burn300k, 100, 300000)
  1593. BENCH_REL(folly_ticket_ping_pong, burn300k, 100, 300000)
  1594. BENCH_REL(boost_shared_ping_pong, burn300k, 100, 300000)
  1595. BENCH_REL(pthrd_rwlock_ping_pong, burn300k, 100, 300000)
  1596. BENCHMARK_DRAW_LINE();
  1597. BENCH_BASE(folly_rwspin_ping_pong, burn1M, 1000, 1000000)
  1598. BENCH_REL(shmtx_w_bare_ping_pong, burn1M, 1000, 1000000)
  1599. BENCH_REL(shmtx_r_bare_ping_pong, burn1M, 1000, 1000000)
  1600. BENCH_REL(folly_ticket_ping_pong, burn1M, 1000, 1000000)
  1601. BENCH_REL(boost_shared_ping_pong, burn1M, 1000, 1000000)
  1602. BENCH_REL(pthrd_rwlock_ping_pong, burn1M, 1000, 1000000)
  1603. // Reproduce with 10 minutes and
  1604. // sudo nice -n -20
  1605. // shared_mutex_test --benchmark --bm_min_iters=1000000
  1606. //
  1607. // Comparison use folly::RWSpinLock as the baseline, with the
  1608. // following row being the default SharedMutex (using *Holder or
  1609. // Token-ful methods).
  1610. //
  1611. // Following results on 2-socket Intel(R) Xeon(R) CPU E5-2660 0 @ 2.20GHz
  1612. //
  1613. // ============================================================================
  1614. // folly/test/SharedMutexTest.cpp relative time/iter iters/s
  1615. // ============================================================================
  1616. // single_thread_lock_shared_unlock_shared 25.17ns 39.74M
  1617. // single_thread_lock_unlock 25.88ns 38.64M
  1618. // ----------------------------------------------------------------------------
  1619. // ----------------------------------------------------------------------------
  1620. // folly_rwspin_reads(1thread) 15.16ns 65.95M
  1621. // shmtx_wr_pri_reads(1thread) 69.18% 21.92ns 45.63M
  1622. // shmtx_w_bare_reads(1thread) 56.07% 27.04ns 36.98M
  1623. // shmtx_rd_pri_reads(1thread) 69.06% 21.95ns 45.55M
  1624. // shmtx_r_bare_reads(1thread) 56.36% 26.90ns 37.17M
  1625. // folly_ticket_reads(1thread) 57.56% 26.34ns 37.96M
  1626. // boost_shared_reads(1thread) 10.55% 143.72ns 6.96M
  1627. // pthrd_rwlock_reads(1thread) 39.61% 38.28ns 26.12M
  1628. // ----------------------------------------------------------------------------
  1629. // folly_rwspin_reads(2thread) 45.05ns 22.20M
  1630. // shmtx_wr_pri_reads(2thread) 379.98% 11.86ns 84.34M
  1631. // shmtx_w_bare_reads(2thread) 319.27% 14.11ns 70.87M
  1632. // shmtx_rd_pri_reads(2thread) 385.59% 11.68ns 85.59M
  1633. // shmtx_r_bare_reads(2thread) 306.56% 14.70ns 68.04M
  1634. // folly_ticket_reads(2thread) 61.07% 73.78ns 13.55M
  1635. // boost_shared_reads(2thread) 13.54% 332.66ns 3.01M
  1636. // pthrd_rwlock_reads(2thread) 34.22% 131.65ns 7.60M
  1637. // ----------------------------------------------------------------------------
  1638. // folly_rwspin_reads(4thread) 62.19ns 16.08M
  1639. // shmtx_wr_pri_reads(4thread) 1022.82% 6.08ns 164.48M
  1640. // shmtx_w_bare_reads(4thread) 875.37% 7.10ns 140.76M
  1641. // shmtx_rd_pri_reads(4thread) 1060.46% 5.86ns 170.53M
  1642. // shmtx_r_bare_reads(4thread) 879.88% 7.07ns 141.49M
  1643. // folly_ticket_reads(4thread) 64.62% 96.23ns 10.39M
  1644. // boost_shared_reads(4thread) 14.86% 418.49ns 2.39M
  1645. // pthrd_rwlock_reads(4thread) 25.01% 248.65ns 4.02M
  1646. // ----------------------------------------------------------------------------
  1647. // folly_rwspin_reads(8thread) 64.09ns 15.60M
  1648. // shmtx_wr_pri_reads(8thread) 2191.99% 2.92ns 342.03M
  1649. // shmtx_w_bare_reads(8thread) 1804.92% 3.55ns 281.63M
  1650. // shmtx_rd_pri_reads(8thread) 2194.60% 2.92ns 342.44M
  1651. // shmtx_r_bare_reads(8thread) 1800.53% 3.56ns 280.95M
  1652. // folly_ticket_reads(8thread) 54.90% 116.74ns 8.57M
  1653. // boost_shared_reads(8thread) 18.25% 351.24ns 2.85M
  1654. // pthrd_rwlock_reads(8thread) 28.19% 227.31ns 4.40M
  1655. // ----------------------------------------------------------------------------
  1656. // folly_rwspin_reads(16thread) 70.06ns 14.27M
  1657. // shmtx_wr_pri_reads(16thread) 4970.09% 1.41ns 709.38M
  1658. // shmtx_w_bare_reads(16thread) 4143.75% 1.69ns 591.44M
  1659. // shmtx_rd_pri_reads(16thread) 5009.31% 1.40ns 714.98M
  1660. // shmtx_r_bare_reads(16thread) 4067.36% 1.72ns 580.54M
  1661. // folly_ticket_reads(16thread) 46.78% 149.77ns 6.68M
  1662. // boost_shared_reads(16thread) 21.67% 323.37ns 3.09M
  1663. // pthrd_rwlock_reads(16thread) 35.05% 199.90ns 5.00M
  1664. // ----------------------------------------------------------------------------
  1665. // folly_rwspin_reads(32thread) 58.83ns 17.00M
  1666. // shmtx_wr_pri_reads(32thread) 5158.37% 1.14ns 876.79M
  1667. // shmtx_w_bare_reads(32thread) 4246.03% 1.39ns 721.72M
  1668. // shmtx_rd_pri_reads(32thread) 4845.97% 1.21ns 823.69M
  1669. // shmtx_r_bare_reads(32thread) 4721.44% 1.25ns 802.52M
  1670. // folly_ticket_reads(32thread) 28.40% 207.15ns 4.83M
  1671. // boost_shared_reads(32thread) 17.08% 344.54ns 2.90M
  1672. // pthrd_rwlock_reads(32thread) 30.01% 196.02ns 5.10M
  1673. // ----------------------------------------------------------------------------
  1674. // folly_rwspin_reads(64thread) 59.19ns 16.89M
  1675. // shmtx_wr_pri_reads(64thread) 3804.54% 1.56ns 642.76M
  1676. // shmtx_w_bare_reads(64thread) 3625.06% 1.63ns 612.43M
  1677. // shmtx_rd_pri_reads(64thread) 3418.19% 1.73ns 577.48M
  1678. // shmtx_r_bare_reads(64thread) 3416.98% 1.73ns 577.28M
  1679. // folly_ticket_reads(64thread) 30.53% 193.90ns 5.16M
  1680. // boost_shared_reads(64thread) 18.59% 318.47ns 3.14M
  1681. // pthrd_rwlock_reads(64thread) 31.35% 188.81ns 5.30M
  1682. // ----------------------------------------------------------------------------
  1683. // ----------------------------------------------------------------------------
  1684. // folly_rwspin(1thread_all_write) 23.77ns 42.06M
  1685. // shmtx_wr_pri(1thread_all_write) 85.09% 27.94ns 35.79M
  1686. // shmtx_rd_pri(1thread_all_write) 85.32% 27.87ns 35.89M
  1687. // folly_ticket(1thread_all_write) 88.11% 26.98ns 37.06M
  1688. // boost_shared(1thread_all_write) 16.49% 144.14ns 6.94M
  1689. // pthrd_rwlock(1thread_all_write) 53.99% 44.04ns 22.71M
  1690. // pthrd_mutex_(1thread_all_write) 86.05% 27.63ns 36.20M
  1691. // ----------------------------------------------------------------------------
  1692. // folly_rwspin(2thread_all_write) 76.05ns 13.15M
  1693. // shmtx_wr_pri(2thread_all_write) 60.67% 125.35ns 7.98M
  1694. // shmtx_rd_pri(2thread_all_write) 60.36% 125.99ns 7.94M
  1695. // folly_ticket(2thread_all_write) 129.10% 58.91ns 16.98M
  1696. // boost_shared(2thread_all_write) 18.65% 407.74ns 2.45M
  1697. // pthrd_rwlock(2thread_all_write) 40.90% 185.92ns 5.38M
  1698. // pthrd_mutex_(2thread_all_write) 127.37% 59.71ns 16.75M
  1699. // ----------------------------------------------------------------------------
  1700. // folly_rwspin(4thread_all_write) 207.17ns 4.83M
  1701. // shmtx_wr_pri(4thread_all_write) 119.42% 173.49ns 5.76M
  1702. // shmtx_rd_pri(4thread_all_write) 117.68% 176.05ns 5.68M
  1703. // folly_ticket(4thread_all_write) 182.39% 113.59ns 8.80M
  1704. // boost_shared(4thread_all_write) 11.98% 1.73us 578.46K
  1705. // pthrd_rwlock(4thread_all_write) 27.50% 753.25ns 1.33M
  1706. // pthrd_mutex_(4thread_all_write) 117.75% 175.95ns 5.68M
  1707. // ----------------------------------------------------------------------------
  1708. // folly_rwspin(8thread_all_write) 326.50ns 3.06M
  1709. // shmtx_wr_pri(8thread_all_write) 125.47% 260.22ns 3.84M
  1710. // shmtx_rd_pri(8thread_all_write) 124.73% 261.76ns 3.82M
  1711. // folly_ticket(8thread_all_write) 253.39% 128.85ns 7.76M
  1712. // boost_shared(8thread_all_write) 6.36% 5.13us 194.87K
  1713. // pthrd_rwlock(8thread_all_write) 38.54% 847.09ns 1.18M
  1714. // pthrd_mutex_(8thread_all_write) 166.31% 196.32ns 5.09M
  1715. // ----------------------------------------------------------------------------
  1716. // folly_rwspin(16thread_all_write) 729.89ns 1.37M
  1717. // shmtx_wr_pri(16thread_all_write) 219.91% 331.91ns 3.01M
  1718. // shmtx_rd_pri(16thread_all_write) 220.09% 331.62ns 3.02M
  1719. // folly_ticket(16thread_all_write) 390.06% 187.12ns 5.34M
  1720. // boost_shared(16thread_all_write) 10.27% 7.11us 140.72K
  1721. // pthrd_rwlock(16thread_all_write) 113.90% 640.84ns 1.56M
  1722. // pthrd_mutex_(16thread_all_write) 401.97% 181.58ns 5.51M
  1723. // ----------------------------------------------------------------------------
  1724. // folly_rwspin(32thread_all_write) 1.55us 645.01K
  1725. // shmtx_wr_pri(32thread_all_write) 415.05% 373.54ns 2.68M
  1726. // shmtx_rd_pri(32thread_all_write) 258.45% 599.88ns 1.67M
  1727. // folly_ticket(32thread_all_write) 525.40% 295.09ns 3.39M
  1728. // boost_shared(32thread_all_write) 20.84% 7.44us 134.45K
  1729. // pthrd_rwlock(32thread_all_write) 254.16% 610.00ns 1.64M
  1730. // pthrd_mutex_(32thread_all_write) 852.51% 181.86ns 5.50M
  1731. // ----------------------------------------------------------------------------
  1732. // folly_rwspin(64thread_all_write) 2.03us 492.00K
  1733. // shmtx_wr_pri(64thread_all_write) 517.65% 392.64ns 2.55M
  1734. // shmtx_rd_pri(64thread_all_write) 288.20% 705.24ns 1.42M
  1735. // folly_ticket(64thread_all_write) 638.22% 318.47ns 3.14M
  1736. // boost_shared(64thread_all_write) 27.56% 7.37us 135.61K
  1737. // pthrd_rwlock(64thread_all_write) 326.75% 622.04ns 1.61M
  1738. // pthrd_mutex_(64thread_all_write) 1231.57% 165.04ns 6.06M
  1739. // ----------------------------------------------------------------------------
  1740. // ----------------------------------------------------------------------------
  1741. // folly_rwspin(1thread_10pct_write) 19.39ns 51.58M
  1742. // shmtx_wr_pri(1thread_10pct_write) 93.87% 20.65ns 48.42M
  1743. // shmtx_rd_pri(1thread_10pct_write) 93.60% 20.71ns 48.28M
  1744. // folly_ticket(1thread_10pct_write) 73.75% 26.29ns 38.04M
  1745. // boost_shared(1thread_10pct_write) 12.97% 149.53ns 6.69M
  1746. // pthrd_rwlock(1thread_10pct_write) 44.15% 43.92ns 22.77M
  1747. // ----------------------------------------------------------------------------
  1748. // folly_rwspin(2thread_10pct_write) 227.88ns 4.39M
  1749. // shmtx_wr_pri(2thread_10pct_write) 321.08% 70.98ns 14.09M
  1750. // shmtx_rd_pri(2thread_10pct_write) 280.65% 81.20ns 12.32M
  1751. // folly_ticket(2thread_10pct_write) 220.43% 103.38ns 9.67M
  1752. // boost_shared(2thread_10pct_write) 58.78% 387.71ns 2.58M
  1753. // pthrd_rwlock(2thread_10pct_write) 112.68% 202.23ns 4.94M
  1754. // ----------------------------------------------------------------------------
  1755. // folly_rwspin(4thread_10pct_write) 444.94ns 2.25M
  1756. // shmtx_wr_pri(4thread_10pct_write) 470.35% 94.60ns 10.57M
  1757. // shmtx_rd_pri(4thread_10pct_write) 349.08% 127.46ns 7.85M
  1758. // folly_ticket(4thread_10pct_write) 305.64% 145.58ns 6.87M
  1759. // boost_shared(4thread_10pct_write) 44.43% 1.00us 998.57K
  1760. // pthrd_rwlock(4thread_10pct_write) 100.59% 442.31ns 2.26M
  1761. // ----------------------------------------------------------------------------
  1762. // folly_rwspin(8thread_10pct_write) 424.67ns 2.35M
  1763. // shmtx_wr_pri(8thread_10pct_write) 337.53% 125.82ns 7.95M
  1764. // shmtx_rd_pri(8thread_10pct_write) 232.32% 182.79ns 5.47M
  1765. // folly_ticket(8thread_10pct_write) 206.59% 205.56ns 4.86M
  1766. // boost_shared(8thread_10pct_write) 19.45% 2.18us 457.90K
  1767. // pthrd_rwlock(8thread_10pct_write) 78.58% 540.42ns 1.85M
  1768. // ----------------------------------------------------------------------------
  1769. // folly_rwspin(16thread_10pct_write) 727.04ns 1.38M
  1770. // shmtx_wr_pri(16thread_10pct_write) 400.60% 181.49ns 5.51M
  1771. // shmtx_rd_pri(16thread_10pct_write) 312.94% 232.33ns 4.30M
  1772. // folly_ticket(16thread_10pct_write) 283.67% 256.30ns 3.90M
  1773. // boost_shared(16thread_10pct_write) 15.87% 4.58us 218.32K
  1774. // pthrd_rwlock(16thread_10pct_write) 131.28% 553.82ns 1.81M
  1775. // ----------------------------------------------------------------------------
  1776. // folly_rwspin(32thread_10pct_write) 810.61ns 1.23M
  1777. // shmtx_wr_pri(32thread_10pct_write) 429.61% 188.68ns 5.30M
  1778. // shmtx_rd_pri(32thread_10pct_write) 321.13% 252.42ns 3.96M
  1779. // folly_ticket(32thread_10pct_write) 247.65% 327.32ns 3.06M
  1780. // boost_shared(32thread_10pct_write) 8.34% 9.71us 102.94K
  1781. // pthrd_rwlock(32thread_10pct_write) 144.28% 561.85ns 1.78M
  1782. // ----------------------------------------------------------------------------
  1783. // folly_rwspin(64thread_10pct_write) 1.10us 912.30K
  1784. // shmtx_wr_pri(64thread_10pct_write) 486.68% 225.22ns 4.44M
  1785. // shmtx_rd_pri(64thread_10pct_write) 412.96% 265.43ns 3.77M
  1786. // folly_ticket(64thread_10pct_write) 280.23% 391.15ns 2.56M
  1787. // boost_shared(64thread_10pct_write) 6.16% 17.79us 56.22K
  1788. // pthrd_rwlock(64thread_10pct_write) 198.81% 551.34ns 1.81M
  1789. // ----------------------------------------------------------------------------
  1790. // ----------------------------------------------------------------------------
  1791. // folly_rwspin(1thread_1pct_write) 19.02ns 52.57M
  1792. // shmtx_wr_pri(1thread_1pct_write) 94.46% 20.14ns 49.66M
  1793. // shmtx_w_bare(1thread_1pct_write) 76.60% 24.83ns 40.27M
  1794. // shmtx_rd_pri(1thread_1pct_write) 93.83% 20.27ns 49.33M
  1795. // shmtx_r_bare(1thread_1pct_write) 77.04% 24.69ns 40.50M
  1796. // folly_ticket(1thread_1pct_write) 72.83% 26.12ns 38.29M
  1797. // boost_shared(1thread_1pct_write) 12.48% 152.44ns 6.56M
  1798. // pthrd_rwlock(1thread_1pct_write) 42.85% 44.39ns 22.53M
  1799. // ----------------------------------------------------------------------------
  1800. // folly_rwspin(2thread_1pct_write) 110.63ns 9.04M
  1801. // shmtx_wr_pri(2thread_1pct_write) 442.12% 25.02ns 39.96M
  1802. // shmtx_w_bare(2thread_1pct_write) 374.65% 29.53ns 33.86M
  1803. // shmtx_rd_pri(2thread_1pct_write) 371.08% 29.81ns 33.54M
  1804. // shmtx_r_bare(2thread_1pct_write) 138.02% 80.15ns 12.48M
  1805. // folly_ticket(2thread_1pct_write) 131.34% 84.23ns 11.87M
  1806. // boost_shared(2thread_1pct_write) 30.35% 364.58ns 2.74M
  1807. // pthrd_rwlock(2thread_1pct_write) 95.48% 115.87ns 8.63M
  1808. // ----------------------------------------------------------------------------
  1809. // folly_rwspin(4thread_1pct_write) 140.62ns 7.11M
  1810. // shmtx_wr_pri(4thread_1pct_write) 627.13% 22.42ns 44.60M
  1811. // shmtx_w_bare(4thread_1pct_write) 552.94% 25.43ns 39.32M
  1812. // shmtx_rd_pri(4thread_1pct_write) 226.06% 62.21ns 16.08M
  1813. // shmtx_r_bare(4thread_1pct_write) 77.61% 181.19ns 5.52M
  1814. // folly_ticket(4thread_1pct_write) 119.58% 117.60ns 8.50M
  1815. // boost_shared(4thread_1pct_write) 25.36% 554.54ns 1.80M
  1816. // pthrd_rwlock(4thread_1pct_write) 45.55% 308.72ns 3.24M
  1817. // ----------------------------------------------------------------------------
  1818. // folly_rwspin(8thread_1pct_write) 166.23ns 6.02M
  1819. // shmtx_wr_pri(8thread_1pct_write) 687.09% 24.19ns 41.33M
  1820. // shmtx_w_bare(8thread_1pct_write) 611.80% 27.17ns 36.80M
  1821. // shmtx_rd_pri(8thread_1pct_write) 140.37% 118.43ns 8.44M
  1822. // shmtx_r_bare(8thread_1pct_write) 80.32% 206.97ns 4.83M
  1823. // folly_ticket(8thread_1pct_write) 117.06% 142.01ns 7.04M
  1824. // boost_shared(8thread_1pct_write) 22.29% 745.67ns 1.34M
  1825. // pthrd_rwlock(8thread_1pct_write) 49.84% 333.55ns 3.00M
  1826. // ----------------------------------------------------------------------------
  1827. // folly_rwspin(16thread_1pct_write) 419.79ns 2.38M
  1828. // shmtx_wr_pri(16thread_1pct_write) 1397.92% 30.03ns 33.30M
  1829. // shmtx_w_bare(16thread_1pct_write) 1324.60% 31.69ns 31.55M
  1830. // shmtx_rd_pri(16thread_1pct_write) 278.12% 150.94ns 6.63M
  1831. // shmtx_r_bare(16thread_1pct_write) 194.25% 216.11ns 4.63M
  1832. // folly_ticket(16thread_1pct_write) 255.38% 164.38ns 6.08M
  1833. // boost_shared(16thread_1pct_write) 33.71% 1.25us 803.01K
  1834. // pthrd_rwlock(16thread_1pct_write) 131.96% 318.12ns 3.14M
  1835. // ----------------------------------------------------------------------------
  1836. // folly_rwspin(32thread_1pct_write) 395.99ns 2.53M
  1837. // shmtx_wr_pri(32thread_1pct_write) 1332.76% 29.71ns 33.66M
  1838. // shmtx_w_bare(32thread_1pct_write) 1208.86% 32.76ns 30.53M
  1839. // shmtx_rd_pri(32thread_1pct_write) 252.97% 156.54ns 6.39M
  1840. // shmtx_r_bare(32thread_1pct_write) 193.79% 204.35ns 4.89M
  1841. // folly_ticket(32thread_1pct_write) 173.16% 228.69ns 4.37M
  1842. // boost_shared(32thread_1pct_write) 17.00% 2.33us 429.40K
  1843. // pthrd_rwlock(32thread_1pct_write) 129.88% 304.89ns 3.28M
  1844. // ----------------------------------------------------------------------------
  1845. // folly_rwspin(64thread_1pct_write) 424.07ns 2.36M
  1846. // shmtx_wr_pri(64thread_1pct_write) 1297.89% 32.67ns 30.61M
  1847. // shmtx_w_bare(64thread_1pct_write) 1228.88% 34.51ns 28.98M
  1848. // shmtx_rd_pri(64thread_1pct_write) 270.40% 156.83ns 6.38M
  1849. // shmtx_r_bare(64thread_1pct_write) 218.05% 194.48ns 5.14M
  1850. // folly_ticket(64thread_1pct_write) 171.44% 247.36ns 4.04M
  1851. // boost_shared(64thread_1pct_write) 10.60% 4.00us 249.95K
  1852. // pthrd_rwlock(64thread_1pct_write) 143.80% 294.91ns 3.39M
  1853. // ----------------------------------------------------------------------------
  1854. // folly_rwspin(2thr_2lock_50pct_write) 10.87ns 91.99M
  1855. // shmtx_wr_pri(2thr_2lock_50pct_write) 83.71% 12.99ns 77.01M
  1856. // shmtx_rd_pri(2thr_2lock_50pct_write) 84.08% 12.93ns 77.34M
  1857. // folly_rwspin(4thr_4lock_50pct_write) 5.32ns 188.12M
  1858. // shmtx_wr_pri(4thr_4lock_50pct_write) 82.21% 6.47ns 154.65M
  1859. // shmtx_rd_pri(4thr_4lock_50pct_write) 81.20% 6.55ns 152.75M
  1860. // folly_rwspin(8thr_8lock_50pct_write) 2.64ns 379.06M
  1861. // shmtx_wr_pri(8thr_8lock_50pct_write) 81.26% 3.25ns 308.03M
  1862. // shmtx_rd_pri(8thr_8lock_50pct_write) 80.95% 3.26ns 306.86M
  1863. // folly_rwspin(16thr_16lock_50pct_write) 1.52ns 656.77M
  1864. // shmtx_wr_pri(16thr_16lock_50pct_write) 86.24% 1.77ns 566.41M
  1865. // shmtx_rd_pri(16thr_16lock_50pct_write) 83.72% 1.82ns 549.82M
  1866. // folly_rwspin(32thr_32lock_50pct_write) 1.19ns 841.03M
  1867. // shmtx_wr_pri(32thr_32lock_50pct_write) 85.08% 1.40ns 715.55M
  1868. // shmtx_rd_pri(32thr_32lock_50pct_write) 86.44% 1.38ns 727.00M
  1869. // folly_rwspin(64thr_64lock_50pct_write) 1.46ns 684.28M
  1870. // shmtx_wr_pri(64thr_64lock_50pct_write) 84.53% 1.73ns 578.43M
  1871. // shmtx_rd_pri(64thr_64lock_50pct_write) 82.80% 1.76ns 566.58M
  1872. // ----------------------------------------------------------------------------
  1873. // folly_rwspin(2thr_2lock_10pct_write) 10.01ns 99.85M
  1874. // shmtx_wr_pri(2thr_2lock_10pct_write) 92.02% 10.88ns 91.88M
  1875. // shmtx_rd_pri(2thr_2lock_10pct_write) 92.35% 10.84ns 92.22M
  1876. // folly_rwspin(4thr_4lock_10pct_write) 4.81ns 207.87M
  1877. // shmtx_wr_pri(4thr_4lock_10pct_write) 89.32% 5.39ns 185.67M
  1878. // shmtx_rd_pri(4thr_4lock_10pct_write) 88.96% 5.41ns 184.93M
  1879. // folly_rwspin(8thr_8lock_10pct_write) 2.39ns 417.62M
  1880. // shmtx_wr_pri(8thr_8lock_10pct_write) 91.17% 2.63ns 380.76M
  1881. // shmtx_rd_pri(8thr_8lock_10pct_write) 89.53% 2.67ns 373.92M
  1882. // folly_rwspin(16thr_16lock_10pct_write) 1.16ns 860.47M
  1883. // shmtx_wr_pri(16thr_16lock_10pct_write) 74.35% 1.56ns 639.77M
  1884. // shmtx_rd_pri(16thr_16lock_10pct_write) 91.34% 1.27ns 785.97M
  1885. // folly_rwspin(32thr_32lock_10pct_write) 1.15ns 866.23M
  1886. // shmtx_wr_pri(32thr_32lock_10pct_write) 92.32% 1.25ns 799.72M
  1887. // shmtx_rd_pri(32thr_32lock_10pct_write) 94.40% 1.22ns 817.71M
  1888. // folly_rwspin(64thr_64lock_10pct_write) 1.41ns 710.54M
  1889. // shmtx_wr_pri(64thr_64lock_10pct_write) 94.14% 1.50ns 668.88M
  1890. // shmtx_rd_pri(64thr_64lock_10pct_write) 94.80% 1.48ns 673.56M
  1891. // ----------------------------------------------------------------------------
  1892. // folly_rwspin(2thr_2lock_1pct_write) 9.58ns 104.36M
  1893. // shmtx_wr_pri(2thr_2lock_1pct_write) 92.00% 10.42ns 96.01M
  1894. // shmtx_rd_pri(2thr_2lock_1pct_write) 91.79% 10.44ns 95.79M
  1895. // folly_rwspin(4thr_4lock_1pct_write) 4.71ns 212.30M
  1896. // shmtx_wr_pri(4thr_4lock_1pct_write) 90.37% 5.21ns 191.85M
  1897. // shmtx_rd_pri(4thr_4lock_1pct_write) 89.94% 5.24ns 190.95M
  1898. // folly_rwspin(8thr_8lock_1pct_write) 2.33ns 429.91M
  1899. // shmtx_wr_pri(8thr_8lock_1pct_write) 90.67% 2.57ns 389.80M
  1900. // shmtx_rd_pri(8thr_8lock_1pct_write) 90.61% 2.57ns 389.55M
  1901. // folly_rwspin(16thr_16lock_1pct_write) 1.10ns 905.23M
  1902. // shmtx_wr_pri(16thr_16lock_1pct_write) 91.96% 1.20ns 832.46M
  1903. // shmtx_rd_pri(16thr_16lock_1pct_write) 92.29% 1.20ns 835.42M
  1904. // folly_rwspin(32thr_32lock_1pct_write) 1.14ns 879.85M
  1905. // shmtx_wr_pri(32thr_32lock_1pct_write) 93.41% 1.22ns 821.86M
  1906. // shmtx_rd_pri(32thr_32lock_1pct_write) 94.18% 1.21ns 828.66M
  1907. // folly_rwspin(64thr_64lock_1pct_write) 1.34ns 748.83M
  1908. // shmtx_wr_pri(64thr_64lock_1pct_write) 94.39% 1.41ns 706.84M
  1909. // shmtx_rd_pri(64thr_64lock_1pct_write) 94.02% 1.42ns 704.06M
  1910. // ----------------------------------------------------------------------------
  1911. // ----------------------------------------------------------------------------
  1912. // folly_rwspin_ping_pong(burn0) 605.63ns 1.65M
  1913. // shmtx_w_bare_ping_pong(burn0) 102.17% 592.76ns 1.69M
  1914. // shmtx_r_bare_ping_pong(burn0) 88.75% 682.44ns 1.47M
  1915. // folly_ticket_ping_pong(burn0) 63.92% 947.56ns 1.06M
  1916. // boost_shared_ping_pong(burn0) 8.52% 7.11us 140.73K
  1917. // pthrd_rwlock_ping_pong(burn0) 7.88% 7.68us 130.15K
  1918. // ----------------------------------------------------------------------------
  1919. // folly_rwspin_ping_pong(burn100k) 727.76ns 1.37M
  1920. // shmtx_w_bare_ping_pong(burn100k) 100.79% 722.09ns 1.38M
  1921. // shmtx_r_bare_ping_pong(burn100k) 101.98% 713.61ns 1.40M
  1922. // folly_ticket_ping_pong(burn100k) 102.80% 707.95ns 1.41M
  1923. // boost_shared_ping_pong(burn100k) 81.49% 893.02ns 1.12M
  1924. // pthrd_rwlock_ping_pong(burn100k) 71.05% 1.02us 976.30K
  1925. // ----------------------------------------------------------------------------
  1926. // folly_rwspin_ping_pong(burn300k) 2.11us 473.46K
  1927. // shmtx_w_bare_ping_pong(burn300k) 100.06% 2.11us 473.72K
  1928. // shmtx_r_bare_ping_pong(burn300k) 98.93% 2.13us 468.39K
  1929. // folly_ticket_ping_pong(burn300k) 96.68% 2.18us 457.73K
  1930. // boost_shared_ping_pong(burn300k) 84.72% 2.49us 401.13K
  1931. // pthrd_rwlock_ping_pong(burn300k) 84.62% 2.50us 400.66K
  1932. // ----------------------------------------------------------------------------
  1933. // folly_rwspin_ping_pong(burn1M) 709.70ns 1.41M
  1934. // shmtx_w_bare_ping_pong(burn1M) 100.28% 707.73ns 1.41M
  1935. // shmtx_r_bare_ping_pong(burn1M) 99.63% 712.37ns 1.40M
  1936. // folly_ticket_ping_pong(burn1M) 100.09% 709.05ns 1.41M
  1937. // boost_shared_ping_pong(burn1M) 94.09% 754.29ns 1.33M
  1938. // pthrd_rwlock_ping_pong(burn1M) 96.32% 736.82ns 1.36M
  1939. // ============================================================================
  1940. int main(int argc, char** argv) {
  1941. (void)folly_rwspin_reads;
  1942. (void)shmtx_wr_pri_reads;
  1943. (void)shmtx_w_bare_reads;
  1944. (void)shmtx_rd_pri_reads;
  1945. (void)shmtx_r_bare_reads;
  1946. (void)folly_ticket_reads;
  1947. (void)boost_shared_reads;
  1948. (void)pthrd_rwlock_reads;
  1949. (void)folly_rwspin;
  1950. (void)shmtx_wr_pri;
  1951. (void)shmtx_w_bare;
  1952. (void)shmtx_rd_pri;
  1953. (void)shmtx_r_bare;
  1954. (void)folly_ticket;
  1955. (void)boost_shared;
  1956. (void)pthrd_rwlock;
  1957. (void)pthrd_mutex_;
  1958. (void)folly_rwspin_ping_pong;
  1959. (void)shmtx_w_bare_ping_pong;
  1960. (void)shmtx_r_bare_ping_pong;
  1961. (void)folly_ticket_ping_pong;
  1962. (void)boost_shared_ping_pong;
  1963. (void)pthrd_rwlock_ping_pong;
  1964. testing::InitGoogleTest(&argc, argv);
  1965. gflags::ParseCommandLineFlags(&argc, &argv, true);
  1966. int rv = RUN_ALL_TESTS();
  1967. folly::runBenchmarksOnFlag();
  1968. return rv;
  1969. }