Codel.cpp 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. /*
  2. * Copyright 2017-present Facebook, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <folly/executors/Codel.h>
  17. #include <folly/portability/GFlags.h>
  18. #include <algorithm>
  19. DEFINE_int32(codel_interval, 100, "Codel default interval time in ms");
  20. DEFINE_int32(codel_target_delay, 5, "Target codel queueing delay in ms");
  21. using namespace std::chrono;
  22. namespace folly {
  23. Codel::Codel()
  24. : codelMinDelayNs_(0),
  25. codelIntervalTimeNs_(
  26. duration_cast<nanoseconds>(steady_clock::now().time_since_epoch())
  27. .count()),
  28. codelResetDelay_(true),
  29. overloaded_(false) {}
  30. bool Codel::overloaded(nanoseconds delay) {
  31. bool ret = false;
  32. auto now = steady_clock::now();
  33. // Avoid another thread updating the value at the same time we are using it
  34. // to calculate the overloaded state
  35. auto minDelay = nanoseconds(codelMinDelayNs_);
  36. if (now > steady_clock::time_point(nanoseconds(codelIntervalTimeNs_)) &&
  37. // testing before exchanging is more cacheline-friendly
  38. (!codelResetDelay_.load(std::memory_order_acquire) &&
  39. !codelResetDelay_.exchange(true))) {
  40. codelIntervalTimeNs_ =
  41. duration_cast<nanoseconds>((now + getInterval()).time_since_epoch())
  42. .count();
  43. if (minDelay > getTargetDelay()) {
  44. overloaded_ = true;
  45. } else {
  46. overloaded_ = false;
  47. }
  48. }
  49. // Care must be taken that only a single thread resets codelMinDelay_,
  50. // and that it happens after the interval reset above
  51. if (codelResetDelay_.load(std::memory_order_acquire) &&
  52. codelResetDelay_.exchange(false)) {
  53. codelMinDelayNs_ = delay.count();
  54. // More than one request must come in during an interval before codel
  55. // starts dropping requests
  56. return false;
  57. } else if (delay < nanoseconds(codelMinDelayNs_)) {
  58. codelMinDelayNs_ = delay.count();
  59. }
  60. // Here is where we apply different logic than codel proper. Instead of
  61. // adapting the interval until the next drop, we slough off requests with
  62. // queueing delay > 2*target_delay while in the overloaded regime. This
  63. // empirically works better for our services than the codel approach of
  64. // increasingly often dropping packets.
  65. if (overloaded_ && delay > getSloughTimeout()) {
  66. ret = true;
  67. }
  68. return ret;
  69. }
  70. int Codel::getLoad() {
  71. // it might be better to use the average delay instead of minDelay, but we'd
  72. // have to track it. aspiring bootcamper?
  73. return std::min<int>(100, 100 * getMinDelay() / getSloughTimeout());
  74. }
  75. nanoseconds Codel::getMinDelay() {
  76. return nanoseconds(codelMinDelayNs_);
  77. }
  78. milliseconds Codel::getInterval() {
  79. return milliseconds(FLAGS_codel_interval);
  80. }
  81. milliseconds Codel::getTargetDelay() {
  82. return milliseconds(FLAGS_codel_target_delay);
  83. }
  84. milliseconds Codel::getSloughTimeout() {
  85. return getTargetDelay() * 2;
  86. }
  87. } // namespace folly