[master] 42959df keep a reserve of idle theads for vital tasks
Nils Goroll
nils.goroll at uplex.de
Tue Nov 1 10:14:05 CET 2016
commit 42959df8c23914e7124ad888e5de7d061bbc72e5
Author: Nils Goroll <nils.goroll at uplex.de>
Date: Thu Oct 27 09:33:02 2016 +0200
keep a reserve of idle theads for vital tasks
(backend tasks for the time being)
Add parameter thread_pool_reserve to tweak
diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 4b3604e..0d3543a 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -303,9 +303,14 @@ struct pool_task {
void *priv;
};
-/* tasks are taken off the queues in this order */
+/*
+ * tasks are taken off the queues in this order
+ *
+ * prios up to TASK_QUEUE_RESERVE are run from the reserve
+ */
enum task_prio {
TASK_QUEUE_BO,
+#define TASK_QUEUE_RESERVE TASK_QUEUE_BO
TASK_QUEUE_REQ,
TASK_QUEUE_VCA,
TASK_QUEUE_END
@@ -920,7 +925,7 @@ const char *sess_close_2str(enum sess_close sc, int want_desc);
/* cache_pool.c */
int Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio how);
-int Pool_Task_Arg(struct worker *, task_func_t *,
+int Pool_Task_Arg(struct worker *, enum task_prio, task_func_t *,
const void *arg, size_t arg_len);
void Pool_Sumstat(struct worker *w);
int Pool_TrySumstat(struct worker *wrk);
diff --git a/bin/varnishd/cache/cache_acceptor.c b/bin/varnishd/cache/cache_acceptor.c
index 4b58fd7..9d69daa 100644
--- a/bin/varnishd/cache/cache_acceptor.c
+++ b/bin/varnishd/cache/cache_acceptor.c
@@ -447,7 +447,8 @@ vca_accept_task(struct worker *wrk, void *arg)
wa.acceptsock = i;
- if (!Pool_Task_Arg(wrk, vca_make_session, &wa, sizeof wa)) {
+ if (!Pool_Task_Arg(wrk, TASK_QUEUE_VCA,
+ vca_make_session, &wa, sizeof wa)) {
/*
* We couldn't get another thread, so we will handle
* the request in this worker thread, but first we
diff --git a/bin/varnishd/cache/cache_pool.h b/bin/varnishd/cache/cache_pool.h
index c400a31..fa3663a 100644
--- a/bin/varnishd/cache/cache_pool.h
+++ b/bin/varnishd/cache/cache_pool.h
@@ -40,6 +40,7 @@ struct pool {
pthread_t herder_thr;
struct lock mtx;
+ unsigned nidle;
struct taskhead idle_queue;
struct taskhead queues[TASK_QUEUE_END];
unsigned nthr;
diff --git a/bin/varnishd/cache/cache_wrk.c b/bin/varnishd/cache/cache_wrk.c
index 1db93a8..1fca8da 100644
--- a/bin/varnishd/cache/cache_wrk.c
+++ b/bin/varnishd/cache/cache_wrk.c
@@ -148,17 +148,35 @@ pool_addstat(struct dstat *dst, struct dstat *src)
memset(src, 0, sizeof *src);
}
+static inline int
+pool_reserve(void)
+{
+ unsigned lim;
+
+ if (cache_param->wthread_reserve == 0)
+ return (cache_param->wthread_min / 20 + 1);
+ lim = cache_param->wthread_min * 950 / 1000;
+ if (cache_param->wthread_reserve > lim)
+ return (lim);
+ return (cache_param->wthread_reserve);
+}
+
/*--------------------------------------------------------------------*/
static struct worker *
-pool_getidleworker(struct pool *pp)
+pool_getidleworker(struct pool *pp, enum task_prio how)
{
- struct pool_task *pt;
+ struct pool_task *pt = NULL;
struct worker *wrk;
CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
Lck_AssertHeld(&pp->mtx);
- pt = VTAILQ_FIRST(&pp->idle_queue);
+ if (how <= TASK_QUEUE_RESERVE || pp->nidle > pool_reserve()) {
+ pt = VTAILQ_FIRST(&pp->idle_queue);
+ if (pt == NULL)
+ AZ(pp->nidle);
+ }
+
if (pt == NULL) {
if (pp->nthr < cache_param->wthread_max) {
pp->dry++;
@@ -179,7 +197,7 @@ pool_getidleworker(struct pool *pp)
*/
int
-Pool_Task_Arg(struct worker *wrk, task_func_t *func,
+Pool_Task_Arg(struct worker *wrk, enum task_prio how, task_func_t *func,
const void *arg, size_t arg_len)
{
struct pool *pp;
@@ -193,9 +211,11 @@ Pool_Task_Arg(struct worker *wrk, task_func_t *func,
CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
Lck_Lock(&pp->mtx);
- wrk2 = pool_getidleworker(pp);
+ wrk2 = pool_getidleworker(pp, how);
if (wrk2 != NULL) {
+ AN(pp->nidle);
VTAILQ_REMOVE(&pp->idle_queue, &wrk2->task, list);
+ pp->nidle--;
retval = 1;
} else {
wrk2 = wrk;
@@ -222,7 +242,6 @@ Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio how)
{
struct worker *wrk;
int retval = 0;
-
CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
AN(task);
AN(task->func);
@@ -232,9 +251,11 @@ Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio how)
/* The common case first: Take an idle thread, do it. */
- wrk = pool_getidleworker(pp);
+ wrk = pool_getidleworker(pp, how);
if (wrk != NULL) {
+ AN(pp->nidle);
VTAILQ_REMOVE(&pp->idle_queue, &wrk->task, list);
+ pp->nidle--;
AZ(wrk->task.func);
wrk->task.func = task->func;
wrk->task.priv = task->priv;
@@ -282,7 +303,7 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
{
struct pool_task *tp;
struct pool_task tpx, tps;
- int i;
+ int i, prio_lim;
CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
wrk->pool = pp;
@@ -294,7 +315,12 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
WS_Reset(wrk->aws, NULL);
AZ(wrk->vsl);
- for (i = 0; i < TASK_QUEUE_END; i++) {
+ if (pp->nidle < pool_reserve())
+ prio_lim = TASK_QUEUE_RESERVE + 1;
+ else
+ prio_lim = TASK_QUEUE_END;
+
+ for (i = 0; i < prio_lim; i++) {
tp = VTAILQ_FIRST(&pp->queues[i]);
if (tp != NULL) {
pp->lqueue--;
@@ -323,6 +349,7 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
wrk->task.func = NULL;
wrk->task.priv = wrk;
VTAILQ_INSERT_HEAD(&pp->idle_queue, &wrk->task, list);
+ pp->nidle++;
do {
i = Lck_CondWait(&wrk->cond, &pp->mtx,
wrk->vcl == NULL ? 0 : wrk->lastused+60.);
@@ -470,6 +497,7 @@ pool_herder(void *priv)
wrk = NULL;
pt = VTAILQ_LAST(&pp->idle_queue, taskhead);
if (pt != NULL) {
+ AN(pp->nidle);
AZ(pt->func);
CAST_OBJ_NOTNULL(wrk, pt->priv, WORKER_MAGIC);
@@ -478,6 +506,7 @@ pool_herder(void *priv)
/* Give it a kiss on the cheek... */
VTAILQ_REMOVE(&pp->idle_queue,
&wrk->task, list);
+ pp->nidle--;
wrk->task.func = pool_kiss_of_death;
AZ(pthread_cond_signal(&wrk->cond));
} else {
diff --git a/bin/varnishd/common/params.h b/bin/varnishd/common/params.h
index 678ed26..9eabc2c 100644
--- a/bin/varnishd/common/params.h
+++ b/bin/varnishd/common/params.h
@@ -91,6 +91,7 @@ struct params {
/* Worker threads and pool */
unsigned wthread_min;
unsigned wthread_max;
+ unsigned wthread_reserve;
double wthread_timeout;
unsigned wthread_pools;
double wthread_add_delay;
diff --git a/bin/varnishd/mgt/mgt_pool.c b/bin/varnishd/mgt/mgt_pool.c
index 8791a44..03e98fd 100644
--- a/bin/varnishd/mgt/mgt_pool.c
+++ b/bin/varnishd/mgt/mgt_pool.c
@@ -62,6 +62,8 @@ tweak_thread_pool_min(struct vsb *vsb, const struct parspec *par,
return (-1);
MCF_ParamConf(MCF_MINIMUM, "thread_pool_max",
"%u", mgt_param.wthread_min);
+ MCF_ParamConf(MCF_MAXIMUM, "thread_pool_reserve",
+ "%u", mgt_param.wthread_min * 950 / 1000);
return (0);
}
@@ -116,6 +118,25 @@ struct parspec WRK_parspec[] = {
"Minimum is 10 threads.",
DELAYED_EFFECT,
"100", "threads" },
+ { "thread_pool_reserve", tweak_uint, &mgt_param.wthread_reserve,
+ 0, NULL,
+ "The number of worker threads reserved for vital tasks "
+ "in each pool.\n"
+ "\n"
+ "Tasks may require other tasks to complete (for example, "
+ "client requests may require backend requests). This reserve "
+ "is to ensure that such tasks still get to run even under high "
+ "load.\n"
+ "\n"
+ "Increasing the reserve may help setups with a high number of "
+ "backend requests at the expense of client performance. "
+ "Setting it too high will waste resources by keeping threads "
+ "unused.\n"
+ "\n"
+ "Default is 0 to auto-tune (currently 5% of thread_pool_min).\n"
+ "Minimum is 1 otherwise, maximum is 95% of thread_pool_min.",
+ DELAYED_EFFECT,
+ "0", "threads" },
{ "thread_pool_timeout",
tweak_timeout, &mgt_param.wthread_timeout,
"10", NULL,
diff --git a/bin/varnishtest/tests/c00079.vtc b/bin/varnishtest/tests/c00079.vtc
new file mode 100644
index 0000000..a0d4c2e
--- /dev/null
+++ b/bin/varnishtest/tests/c00079.vtc
@@ -0,0 +1,23 @@
+varnishtest "thread_pool_reserve max adjustment"
+
+server s1 {
+} -start
+
+varnish v1 \
+ -arg "-p thread_pool_min=10" \
+ -arg "-p thread_pool_max=100" \
+ -arg "-p thread_pools=1" \
+ -arg "-p thread_pool_timeout=10" \
+ -vcl+backend {}
+varnish v1 -start
+
+varnish v1 -cliok "param.set thread_pool_reserve 0"
+varnish v1 -cliok "param.set thread_pool_reserve 1"
+varnish v1 -cliok "param.set thread_pool_reserve 9"
+varnish v1 -clierr 106 "param.set thread_pool_reserve 10"
+
+varnish v1 -cliok "param.set thread_pool_min 100"
+varnish v1 -cliok "param.set thread_pool_reserve 0"
+varnish v1 -cliok "param.set thread_pool_reserve 1"
+varnish v1 -cliok "param.set thread_pool_reserve 95"
+varnish v1 -clierr 106 "param.set thread_pool_reserve 96"
diff --git a/include/tbl/params.h b/include/tbl/params.h
index 1e01384..0250dc2 100644
--- a/include/tbl/params.h
+++ b/include/tbl/params.h
@@ -1142,6 +1142,34 @@ PARAM(
/* l-text */ "",
/* func */ NULL
)
+/* actual location mgt_pool.c */
+PARAM(
+ /* name */ thread_pool_reserve,
+ /* typ */ thread_pool_reserve,
+ /* min */ NULL,
+ /* max */ NULL,
+ /* default */ "0",
+ /* units */ "threads",
+ /* flags */ DELAYED_EFFECT| EXPERIMENTAL,
+ /* s-text */
+ "The number of worker threads reserved for vital tasks "
+ "in each pool.\n"
+ "\n"
+ "Tasks may require other tasks to complete (for example, "
+ "client requests may require backend requests). This reserve "
+ "is to ensure that such tasks still get to run even under high "
+ "load.\n"
+ "\n"
+ "Increasing the reserve may help setups with a high number of "
+ "backend requests at the expense of client performance. "
+ "Setting it too high will waste resources by keeping threads "
+ "unused.\n"
+ "\n"
+ "Default is 0 to auto-tune (currently 5% of thread_pool_min).\n"
+ "Minimum is 1 otherwise, maximum is 95% of thread_pool_min.",
+ /* l-text */ "",
+ /* func */ NULL
+)
/* actual location mgt_pool.c */
PARAM(
More information about the varnish-commit
mailing list