[tor-commits] [tor] 72/77: hs_pow: modified approach to pqueue level thresholds
gitolite role
git at cupani.torproject.org
Wed May 10 15:47:56 UTC 2023
This is an automated email from the git hooks/post-receive script.
dgoulet pushed a commit to branch main
in repository tor.
commit 60231536315517b4133cbe80d430b8133dd42c55
Author: Micah Elizabeth Scott <beth at torproject.org>
AuthorDate: Mon Apr 10 15:27:33 2023 -0700
hs_pow: modified approach to pqueue level thresholds
This centralizes the logic for deciding on these magic thresholds,
and tries to reduce them to just two: a min and max. The min should be a
"nearly empty" threshold, indicating that the queue only contains work
we expect to be able to complete very soon. The max level triggers a
bulk culling process that reduces the queue to half that amount.
This patch calculates both thresholds based on the torrc pqueue rate
settings if they're present, and uses generic defaults if the user asked
for an unlimited dequeue rate in torrc.
Signed-off-by: Micah Elizabeth Scott <beth at torproject.org>
---
src/feature/hs/hs_circuit.c | 31 +++++--------------------------
src/feature/hs/hs_pow.h | 13 ++++++++++---
src/feature/hs/hs_service.c | 14 +++++++++++++-
3 files changed, 28 insertions(+), 30 deletions(-)
diff --git a/src/feature/hs/hs_circuit.c b/src/feature/hs/hs_circuit.c
index 1287b2beda..c3f2fbfc1e 100644
--- a/src/feature/hs/hs_circuit.c
+++ b/src/feature/hs/hs_circuit.c
@@ -650,14 +650,8 @@ queued_rend_request_is_too_old(pending_rend_t *req, time_t now)
return 0;
}
-/** Maximum number of rendezvous requests we enqueue per service. We allow the
- * average amount of INTRODUCE2 that a service can process in a second times
- * the rendezvous timeout. Then we let it grow to twice that before
- * discarding the bottom half in trim_rend_pqueue(). */
-#define QUEUED_REND_REQUEST_HIGH_WATER (2 * 180 * MAX_REND_TIMEOUT)
-
/** Our rendezvous request priority queue is too full; keep the first
- * QUEUED_REND_REQUEST_HIGH_WATER/2 entries and discard the rest.
+ * pqueue_high_level/2 entries and discard the rest.
*/
static void
trim_rend_pqueue(hs_pow_service_state_t *pow_state, time_t now)
@@ -670,7 +664,7 @@ trim_rend_pqueue(hs_pow_service_state_t *pow_state, time_t now)
smartlist_len(old_pqueue));
while (smartlist_len(old_pqueue) &&
- smartlist_len(new_pqueue) < QUEUED_REND_REQUEST_HIGH_WATER/2) {
+ smartlist_len(new_pqueue) < pow_state->pqueue_high_level/2) {
/* while there are still old ones, and the new one isn't full yet */
pending_rend_t *req =
smartlist_pqueue_pop(old_pqueue,
@@ -752,12 +746,6 @@ rend_pqueue_clear(hs_pow_service_state_t *pow_state)
}
}
-/** How many rendezvous request we handle per mainloop event. Per prop327,
- * handling an INTRODUCE2 cell takes on average 5.56msec on an average CPU and
- * so it means that launching this max amount of circuits is well below 0.08
- * seconds which we believe is negligable on the whole mainloop. */
-#define MAX_REND_REQUEST_PER_MAINLOOP 16
-
/** What is the threshold of in-progress (CIRCUIT_PURPOSE_S_CONNECT_REND)
* rendezvous responses above which we won't launch new low-effort rendezvous
* responses? (Intro2 cells with suitable PoW effort are not affected
@@ -767,7 +755,6 @@ rend_pqueue_clear(hs_pow_service_state_t *pow_state)
static void
handle_rend_pqueue_cb(mainloop_event_t *ev, void *arg)
{
- int count = 0;
hs_service_t *service = arg;
hs_pow_service_state_t *pow_state = service->state.pow_state;
time_t now = time(NULL);
@@ -845,10 +832,6 @@ handle_rend_pqueue_cb(mainloop_event_t *ev, void *arg)
token_bucket_ctr_get(&pow_state->pqueue_bucket) < 1) {
break;
}
-
- if (++count == MAX_REND_REQUEST_PER_MAINLOOP) {
- break;
- }
}
/* If there are still some pending rendezvous circuits in the pqueue then
@@ -856,12 +839,8 @@ handle_rend_pqueue_cb(mainloop_event_t *ev, void *arg)
if (smartlist_len(pow_state->rend_request_pqueue) > 0) {
mainloop_event_activate(pow_state->pop_pqueue_ev);
- // XXX: Is this a good threshhold to decide that we have a significant
- // queue? I just made it up.
- if (smartlist_len(pow_state->rend_request_pqueue) >
- 2*MAX_REND_REQUEST_PER_MAINLOOP) {
- /* Note the fact that we had multiple eventloops worth of queue
- * to service, for effort estimation */
+ if (smartlist_len(pow_state->rend_request_pqueue) >=
+ pow_state->pqueue_low_level) {
pow_state->had_queue = 1;
}
}
@@ -922,7 +901,7 @@ enqueue_rend_request(const hs_service_t *service, hs_service_intro_point_t *ip,
/* See if there are so many cells queued that we need to cull. */
if (smartlist_len(pow_state->rend_request_pqueue) >=
- QUEUED_REND_REQUEST_HIGH_WATER) {
+ pow_state->pqueue_high_level) {
trim_rend_pqueue(pow_state, now);
hs_metrics_pow_pqueue_rdv(service,
smartlist_len(pow_state->rend_request_pqueue));
diff --git a/src/feature/hs/hs_pow.h b/src/feature/hs/hs_pow.h
index 357f527c34..23c05419a6 100644
--- a/src/feature/hs/hs_pow.h
+++ b/src/feature/hs/hs_pow.h
@@ -74,9 +74,16 @@ typedef struct hs_pow_service_state_t {
* based on the amount of effort that was exerted in the PoW. */
smartlist_t *rend_request_pqueue;
- /* HRPR TODO Is this cursed? Including compat_libevent for this. feb 24 */
- /* When PoW defenses are enabled, this event pops rendezvous requests from
- * the service's priority queue; higher effort is higher priority. */
+ /* Low level mark for pqueue size. Below this length it's considered to be
+ * effectively empty when calculating effort adjustments. */
+ int pqueue_low_level;
+
+ /* High level mark for pqueue size. When the queue is this length we will
+ * trim it down to pqueue_high_level/2. */
+ int pqueue_high_level;
+
+ /* Event callback for dequeueing rend requests, paused when the queue is
+ * empty or rate limited. */
mainloop_event_t *pop_pqueue_ev;
/* Token bucket for rate limiting the priority queue */
diff --git a/src/feature/hs/hs_service.c b/src/feature/hs/hs_service.c
index 43598ad768..b2af881597 100644
--- a/src/feature/hs/hs_service.c
+++ b/src/feature/hs/hs_service.c
@@ -278,6 +278,12 @@ initialize_pow_defenses(hs_service_t *service)
pow_state->rend_request_pqueue = smartlist_new();
pow_state->pop_pqueue_ev = NULL;
+ /* If we are using the pqueue rate limiter, calculate min and max queue
+ * levels based on those programmed rates. If not, we have generic
+ * defaults */
+ pow_state->pqueue_low_level = 16;
+ pow_state->pqueue_high_level = 16384;
+
if (service->config.pow_queue_rate > 0 &&
service->config.pow_queue_burst >= service->config.pow_queue_rate) {
pow_state->using_pqueue_bucket = 1;
@@ -285,6 +291,11 @@ initialize_pow_defenses(hs_service_t *service)
service->config.pow_queue_rate,
service->config.pow_queue_burst,
(uint32_t) approx_time());
+
+ pow_state->pqueue_low_level = MAX(8, service->config.pow_queue_rate / 4);
+ pow_state->pqueue_high_level =
+ service->config.pow_queue_burst +
+ service->config.pow_queue_rate * MAX_REND_TIMEOUT * 2;
}
/* We recalculate and update the suggested effort every HS_UPDATE_PERIOD
@@ -2701,7 +2712,8 @@ update_suggested_effort(hs_service_t *service, time_t now)
/* Increase when the top of queue is high-effort */
aimd_event = INCREASE;
}
- } else if (smartlist_len(pow_state->rend_request_pqueue) == 0) {
+ } else if (smartlist_len(pow_state->rend_request_pqueue) <
+ pow_state->pqueue_low_level) {
/* Dec when the queue is empty now and had_queue wasn't set this period */
aimd_event = DECREASE;
}
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
More information about the tor-commits
mailing list