IOBufQueue.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Copyright 2013-present Facebook, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #pragma once
  17. #include <folly/ScopeGuard.h>
  18. #include <folly/io/IOBuf.h>
  19. #include <stdexcept>
  20. #include <string>
  21. namespace folly {
  22. /**
  23. * An IOBufQueue encapsulates a chain of IOBufs and provides
  24. * convenience functions to append data to the back of the chain
  25. * and remove data from the front.
  26. *
  27. * You may also prepend data into the headroom of the first buffer in the
  28. * chain, if any.
  29. */
  30. class IOBufQueue {
  31. private:
  32. /**
  33. * This guard should be taken by any method that intends to do any changes
  34. * to in data_ (e.g. appending to it).
  35. *
  36. * It flushes the writable tail cache and refills it on destruction.
  37. */
  38. auto updateGuard() {
  39. flushCache();
  40. return folly::makeGuard([this] { updateWritableTailCache(); });
  41. }
  42. struct WritableRangeCacheData {
  43. std::pair<uint8_t*, uint8_t*> cachedRange;
  44. bool attached{false};
  45. WritableRangeCacheData() = default;
  46. WritableRangeCacheData(WritableRangeCacheData&& other)
  47. : cachedRange(other.cachedRange), attached(other.attached) {
  48. other.cachedRange = {};
  49. other.attached = false;
  50. }
  51. WritableRangeCacheData& operator=(WritableRangeCacheData&& other) {
  52. cachedRange = other.cachedRange;
  53. attached = other.attached;
  54. other.cachedRange = {};
  55. other.attached = false;
  56. return *this;
  57. }
  58. WritableRangeCacheData(const WritableRangeCacheData&) = delete;
  59. WritableRangeCacheData& operator=(const WritableRangeCacheData&) = delete;
  60. };
  61. public:
  62. struct Options {
  63. Options() : cacheChainLength(false) {}
  64. bool cacheChainLength;
  65. };
  66. /**
  67. * Commonly used Options, currently the only possible value other than
  68. * the default.
  69. */
  70. static Options cacheChainLength() {
  71. Options options;
  72. options.cacheChainLength = true;
  73. return options;
  74. }
  75. /**
  76. * WritableRangeCache represents a cache of current writable tail and provides
  77. * cheap and simple interface to append to it that avoids paying the cost of
  78. * preallocate/postallocate pair (i.e. indirections and checks).
  79. *
  80. * The cache is flushed on destruction/copy/move and on non-const accesses to
  81. * the underlying IOBufQueue.
  82. *
  83. * Note: there can be only one active cache for a given IOBufQueue, i.e. when
  84. * you fill a cache object it automatically invalidates other
  85. * cache (if any).
  86. */
  87. class WritableRangeCache {
  88. public:
  89. explicit WritableRangeCache(folly::IOBufQueue* q = nullptr) : queue_(q) {
  90. if (queue_) {
  91. fillCache();
  92. }
  93. }
  94. /**
  95. * Move constructor/assignment can move the cached range, but must update
  96. * the reference in IOBufQueue.
  97. */
  98. WritableRangeCache(WritableRangeCache&& other)
  99. : data_(std::move(other.data_)), queue_(other.queue_) {
  100. if (data_.attached) {
  101. queue_->updateCacheRef(data_);
  102. }
  103. }
  104. WritableRangeCache& operator=(WritableRangeCache&& other) {
  105. if (data_.attached) {
  106. queue_->clearWritableRangeCache();
  107. }
  108. data_ = std::move(other.data_);
  109. queue_ = other.queue_;
  110. if (data_.attached) {
  111. queue_->updateCacheRef(data_);
  112. }
  113. return *this;
  114. }
  115. /**
  116. * Copy constructor/assignment cannot copy the cached range.
  117. */
  118. WritableRangeCache(const WritableRangeCache& other)
  119. : queue_(other.queue_) {}
  120. WritableRangeCache& operator=(const WritableRangeCache& other) {
  121. if (data_.attached) {
  122. queue_->clearWritableRangeCache();
  123. }
  124. queue_ = other.queue_;
  125. return *this;
  126. }
  127. ~WritableRangeCache() {
  128. if (data_.attached) {
  129. queue_->clearWritableRangeCache();
  130. }
  131. }
  132. /**
  133. * Reset the underlying IOBufQueue, will flush current cache if present.
  134. */
  135. void reset(IOBufQueue* q) {
  136. if (data_.attached) {
  137. queue_->clearWritableRangeCache();
  138. }
  139. queue_ = q;
  140. if (queue_) {
  141. fillCache();
  142. }
  143. }
  144. /**
  145. * Get a pointer to the underlying IOBufQueue object.
  146. */
  147. IOBufQueue* queue() {
  148. return queue_;
  149. }
  150. /**
  151. * Return a pointer to the start of cached writable tail.
  152. *
  153. * Note: doesn't populate cache.
  154. */
  155. uint8_t* writableData() {
  156. dcheckIntegrity();
  157. return data_.cachedRange.first;
  158. }
  159. /**
  160. * Return a length of cached writable tail.
  161. *
  162. * Note: doesn't populate cache.
  163. */
  164. size_t length() {
  165. dcheckIntegrity();
  166. return data_.cachedRange.second - data_.cachedRange.first;
  167. }
  168. /**
  169. * Mark n bytes as occupied (e.g. postallocate).
  170. */
  171. void append(size_t n) {
  172. dcheckIntegrity();
  173. // This can happen only if somebody is misusing the interface.
  174. // E.g. calling append after touching IOBufQueue or without checking
  175. // the length().
  176. if (LIKELY(data_.cachedRange.first != nullptr)) {
  177. DCHECK_LE(n, length());
  178. data_.cachedRange.first += n;
  179. } else {
  180. appendSlow(n);
  181. }
  182. }
  183. /**
  184. * Same as append(n), but avoids checking if there is a cache.
  185. * The caller must guarantee that the cache is set (e.g. the caller just
  186. * called fillCache or checked that it's not empty).
  187. */
  188. void appendUnsafe(size_t n) {
  189. data_.cachedRange.first += n;
  190. }
  191. /**
  192. * Fill the cache of writable tail from the underlying IOBufQueue.
  193. */
  194. void fillCache() {
  195. queue_->fillWritableRangeCache(data_);
  196. }
  197. private:
  198. WritableRangeCacheData data_;
  199. IOBufQueue* queue_;
  200. FOLLY_NOINLINE void appendSlow(size_t n) {
  201. queue_->postallocate(n);
  202. }
  203. void dcheckIntegrity() {
  204. // Tail start should always be less than tail end.
  205. DCHECK_LE(
  206. (void*)data_.cachedRange.first, (void*)data_.cachedRange.second);
  207. DCHECK(
  208. data_.cachedRange.first != nullptr ||
  209. data_.cachedRange.second == nullptr);
  210. // Cached range should be always empty if the cache is not attached.
  211. DCHECK(
  212. data_.attached ||
  213. (data_.cachedRange.first == nullptr &&
  214. data_.cachedRange.second == nullptr));
  215. // We cannot be in attached state if the queue_ is not set.
  216. DCHECK(queue_ != nullptr || !data_.attached);
  217. // If we're attached and the cache is not empty, then it should coincide
  218. // with the tail buffer.
  219. DCHECK(
  220. !data_.attached || data_.cachedRange.first == nullptr ||
  221. (queue_->head_ != nullptr &&
  222. data_.cachedRange.first >= queue_->head_->prev()->writableTail() &&
  223. data_.cachedRange.second ==
  224. queue_->head_->prev()->writableTail() +
  225. queue_->head_->prev()->tailroom()));
  226. }
  227. };
  228. explicit IOBufQueue(const Options& options = Options());
  229. ~IOBufQueue();
  230. /**
  231. * Return a space to prepend bytes and the amount of headroom available.
  232. */
  233. std::pair<void*, std::size_t> headroom();
  234. /**
  235. * Indicate that n bytes from the headroom have been used.
  236. */
  237. void markPrepended(std::size_t n);
  238. /**
  239. * Prepend an existing range; throws std::overflow_error if not enough
  240. * room.
  241. */
  242. void prepend(const void* buf, std::size_t n);
  243. /**
  244. * Add a buffer or buffer chain to the end of this queue. The
  245. * queue takes ownership of buf.
  246. *
  247. * If pack is true, we try to reduce wastage at the end of this queue
  248. * by copying some data from the first buffers in the buf chain (and
  249. * releasing the buffers), if possible. If pack is false, we leave
  250. * the chain topology unchanged.
  251. */
  252. void append(std::unique_ptr<folly::IOBuf>&& buf, bool pack = false);
  253. /**
  254. * Add a queue to the end of this queue. The queue takes ownership of
  255. * all buffers from the other queue.
  256. */
  257. void append(IOBufQueue& other, bool pack = false);
  258. void append(IOBufQueue&& other, bool pack = false) {
  259. append(other, pack); // call lvalue reference overload, above
  260. }
  261. /**
  262. * Copy len bytes, starting at buf, to the end of this queue.
  263. * The caller retains ownership of the source data.
  264. */
  265. void append(const void* buf, size_t len);
  266. /**
  267. * Copy a string to the end of this queue.
  268. * The caller retains ownership of the source data.
  269. */
  270. void append(StringPiece sp) {
  271. append(sp.data(), sp.size());
  272. }
  273. /**
  274. * Append a chain of IOBuf objects that point to consecutive regions
  275. * within buf.
  276. *
  277. * Just like IOBuf::wrapBuffer, this should only be used when the caller
  278. * knows ahead of time and can ensure that all IOBuf objects that will point
  279. * to this buffer will be destroyed before the buffer itself is destroyed;
  280. * all other caveats from wrapBuffer also apply.
  281. *
  282. * Every buffer except for the last will wrap exactly blockSize bytes.
  283. * Importantly, this method may be used to wrap buffers larger than 4GB.
  284. */
  285. void wrapBuffer(
  286. const void* buf,
  287. size_t len,
  288. std::size_t blockSize = (1U << 31)); // default block size: 2GB
  289. /**
  290. * Obtain a writable block of contiguous bytes at the end of this
  291. * queue, allocating more space if necessary. The amount of space
  292. * reserved will be at least min. If min contiguous space is not
  293. * available at the end of the queue, and IOBuf with size newAllocationSize
  294. * is appended to the chain and returned. The actual available space
  295. * may be larger than newAllocationSize, but will be truncated to max,
  296. * if specified.
  297. *
  298. * If the caller subsequently writes anything into the returned space,
  299. * it must call the postallocate() method.
  300. *
  301. * @return The starting address of the block and the length in bytes.
  302. *
  303. * @note The point of the preallocate()/postallocate() mechanism is
  304. * to support I/O APIs such as Thrift's TAsyncSocket::ReadCallback
  305. * that request a buffer from the application and then, in a later
  306. * callback, tell the application how much of the buffer they've
  307. * filled with data.
  308. */
  309. std::pair<void*, std::size_t> preallocate(
  310. std::size_t min,
  311. std::size_t newAllocationSize,
  312. std::size_t max = std::numeric_limits<std::size_t>::max()) {
  313. dcheckCacheIntegrity();
  314. if (LIKELY(writableTail() != nullptr && tailroom() >= min)) {
  315. return std::make_pair(
  316. writableTail(), std::min<std::size_t>(max, tailroom()));
  317. }
  318. return preallocateSlow(min, newAllocationSize, max);
  319. }
  320. /**
  321. * Tell the queue that the caller has written data into the first n
  322. * bytes provided by the previous preallocate() call.
  323. *
  324. * @note n should be less than or equal to the size returned by
  325. * preallocate(). If n is zero, the caller may skip the call
  326. * to postallocate(). If n is nonzero, the caller must not
  327. * invoke any other non-const methods on this IOBufQueue between
  328. * the call to preallocate and the call to postallocate().
  329. */
  330. void postallocate(std::size_t n) {
  331. dcheckCacheIntegrity();
  332. DCHECK_LE(
  333. (void*)(cachePtr_->cachedRange.first + n),
  334. (void*)cachePtr_->cachedRange.second);
  335. cachePtr_->cachedRange.first += n;
  336. }
  337. /**
  338. * Obtain a writable block of n contiguous bytes, allocating more space
  339. * if necessary, and mark it as used. The caller can fill it later.
  340. */
  341. void* allocate(std::size_t n) {
  342. void* p = preallocate(n, n).first;
  343. postallocate(n);
  344. return p;
  345. }
  346. void* writableTail() const {
  347. dcheckCacheIntegrity();
  348. return cachePtr_->cachedRange.first;
  349. }
  350. size_t tailroom() const {
  351. dcheckCacheIntegrity();
  352. return cachePtr_->cachedRange.second - cachePtr_->cachedRange.first;
  353. }
  354. /**
  355. * Split off the first n bytes of the queue into a separate IOBuf chain,
  356. * and transfer ownership of the new chain to the caller. The IOBufQueue
  357. * retains ownership of everything after the split point.
  358. *
  359. * @warning If the split point lies in the middle of some IOBuf within
  360. * the chain, this function may, as an implementation detail,
  361. * clone that IOBuf.
  362. *
  363. * @throws std::underflow_error if n exceeds the number of bytes
  364. * in the queue.
  365. */
  366. std::unique_ptr<folly::IOBuf> split(size_t n) {
  367. return split(n, true);
  368. }
  369. /**
  370. * Similar to split, but will return the entire queue instead of throwing
  371. * if n exceeds the number of bytes in the queue.
  372. */
  373. std::unique_ptr<folly::IOBuf> splitAtMost(size_t n) {
  374. return split(n, false);
  375. }
  376. /**
  377. * Similar to IOBuf::trimStart, but works on the whole queue. Will
  378. * pop off buffers that have been completely trimmed.
  379. */
  380. void trimStart(size_t amount);
  381. /**
  382. * Similar to trimStart, but will trim at most amount bytes and returns
  383. * the number of bytes trimmed.
  384. */
  385. size_t trimStartAtMost(size_t amount);
  386. /**
  387. * Similar to IOBuf::trimEnd, but works on the whole queue. Will
  388. * pop off buffers that have been completely trimmed.
  389. */
  390. void trimEnd(size_t amount);
  391. /**
  392. * Similar to trimEnd, but will trim at most amount bytes and returns
  393. * the number of bytes trimmed.
  394. */
  395. size_t trimEndAtMost(size_t amount);
  396. /**
  397. * Transfer ownership of the queue's entire IOBuf chain to the caller.
  398. */
  399. std::unique_ptr<folly::IOBuf> move() {
  400. auto guard = updateGuard();
  401. std::unique_ptr<folly::IOBuf> res = std::move(head_);
  402. chainLength_ = 0;
  403. return res;
  404. }
  405. /**
  406. * Access the front IOBuf.
  407. *
  408. * Note: caller will see the current state of the chain, but may not see
  409. * future updates immediately, due to the presence of a tail cache.
  410. * Note: the caller may potentially clone the chain, thus marking all buffers
  411. * as shared. We may still continue writing to the tail of the last
  412. * IOBuf without checking if it's shared, but this is fine, since the
  413. * cloned IOBufs won't reference that data.
  414. */
  415. const folly::IOBuf* front() const {
  416. flushCache();
  417. return head_.get();
  418. }
  419. /**
  420. * returns the first IOBuf in the chain and removes it from the chain
  421. *
  422. * @return first IOBuf in the chain or nullptr if none.
  423. */
  424. std::unique_ptr<folly::IOBuf> pop_front();
  425. /**
  426. * Total chain length, only valid if cacheLength was specified in the
  427. * constructor.
  428. */
  429. size_t chainLength() const {
  430. if (UNLIKELY(!options_.cacheChainLength)) {
  431. throw std::invalid_argument("IOBufQueue: chain length not cached");
  432. }
  433. dcheckCacheIntegrity();
  434. return chainLength_ + (cachePtr_->cachedRange.first - tailStart_);
  435. }
  436. /**
  437. * Returns true iff the IOBuf chain length is 0.
  438. */
  439. bool empty() const {
  440. dcheckCacheIntegrity();
  441. return !head_ ||
  442. (head_->empty() && cachePtr_->cachedRange.first == tailStart_);
  443. }
  444. const Options& options() const {
  445. return options_;
  446. }
  447. /**
  448. * Clear the queue. Note that this does not release the buffers, it
  449. * just sets their length to zero; useful if you want to reuse the
  450. * same queue without reallocating.
  451. */
  452. void clear();
  453. /**
  454. * Append the queue to a std::string. Non-destructive.
  455. */
  456. void appendToString(std::string& out) const;
  457. /**
  458. * Calls IOBuf::gather() on the head of the queue, if it exists.
  459. */
  460. void gather(std::size_t maxLength);
  461. /** Movable */
  462. IOBufQueue(IOBufQueue&&) noexcept;
  463. IOBufQueue& operator=(IOBufQueue&&);
  464. private:
  465. std::unique_ptr<folly::IOBuf> split(size_t n, bool throwOnUnderflow);
  466. static const size_t kChainLengthNotCached = (size_t)-1;
  467. /** Not copyable */
  468. IOBufQueue(const IOBufQueue&) = delete;
  469. IOBufQueue& operator=(const IOBufQueue&) = delete;
  470. Options options_;
  471. // NOTE that chainLength_ is still updated even if !options_.cacheChainLength
  472. // because doing it unchecked in postallocate() is faster (no (mis)predicted
  473. // branch)
  474. mutable size_t chainLength_{0};
  475. /**
  476. * Everything that has been appended but not yet discarded or moved out
  477. * Note: anything that needs to operate on a tail should either call
  478. * flushCache() or grab updateGuard() (it will flush the cache itself).
  479. */
  480. std::unique_ptr<folly::IOBuf> head_;
  481. mutable uint8_t* tailStart_{nullptr};
  482. WritableRangeCacheData* cachePtr_{nullptr};
  483. WritableRangeCacheData localCache_;
  484. void dcheckCacheIntegrity() const {
  485. // Tail start should always be less than tail end.
  486. DCHECK_LE((void*)tailStart_, (void*)cachePtr_->cachedRange.first);
  487. DCHECK_LE(
  488. (void*)cachePtr_->cachedRange.first,
  489. (void*)cachePtr_->cachedRange.second);
  490. DCHECK(
  491. cachePtr_->cachedRange.first != nullptr ||
  492. cachePtr_->cachedRange.second == nullptr);
  493. // There is always an attached cache instance.
  494. DCHECK(cachePtr_->attached);
  495. // Either cache is empty or it coincides with the tail.
  496. DCHECK(
  497. cachePtr_->cachedRange.first == nullptr ||
  498. (head_ != nullptr && tailStart_ == head_->prev()->writableTail() &&
  499. tailStart_ <= cachePtr_->cachedRange.first &&
  500. cachePtr_->cachedRange.first >= head_->prev()->writableTail() &&
  501. cachePtr_->cachedRange.second ==
  502. head_->prev()->writableTail() + head_->prev()->tailroom()));
  503. }
  504. /**
  505. * Populate dest with writable tail range cache.
  506. */
  507. void fillWritableRangeCache(WritableRangeCacheData& dest) {
  508. dcheckCacheIntegrity();
  509. if (cachePtr_ != &dest) {
  510. dest = std::move(*cachePtr_);
  511. cachePtr_ = &dest;
  512. }
  513. }
  514. /**
  515. * Clear current writable tail cache and reset it to localCache_
  516. */
  517. void clearWritableRangeCache() {
  518. flushCache();
  519. if (cachePtr_ != &localCache_) {
  520. localCache_ = std::move(*cachePtr_);
  521. cachePtr_ = &localCache_;
  522. }
  523. DCHECK(cachePtr_ == &localCache_ && localCache_.attached);
  524. }
  525. /**
  526. * Commit any pending changes to the tail of the queue.
  527. */
  528. void flushCache() const {
  529. dcheckCacheIntegrity();
  530. if (tailStart_ != cachePtr_->cachedRange.first) {
  531. auto buf = head_->prev();
  532. DCHECK_EQ(
  533. (void*)(buf->writableTail() + buf->tailroom()),
  534. (void*)cachePtr_->cachedRange.second);
  535. auto len = cachePtr_->cachedRange.first - tailStart_;
  536. buf->append(len);
  537. chainLength_ += len;
  538. tailStart_ += len;
  539. }
  540. }
  541. // For WritableRangeCache move assignment/construction.
  542. void updateCacheRef(WritableRangeCacheData& newRef) {
  543. cachePtr_ = &newRef;
  544. }
  545. /**
  546. * Update cached writable tail range. Called by updateGuard()
  547. */
  548. void updateWritableTailCache() {
  549. if (LIKELY(head_ != nullptr)) {
  550. IOBuf* buf = head_->prev();
  551. if (LIKELY(!buf->isSharedOne())) {
  552. tailStart_ = buf->writableTail();
  553. cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>(
  554. tailStart_, tailStart_ + buf->tailroom());
  555. return;
  556. }
  557. }
  558. tailStart_ = nullptr;
  559. cachePtr_->cachedRange = std::pair<uint8_t*, uint8_t*>();
  560. }
  561. std::pair<void*, std::size_t> preallocateSlow(
  562. std::size_t min,
  563. std::size_t newAllocationSize,
  564. std::size_t max);
  565. };
  566. } // namespace folly