[master] 96f5732 Make worker threads able to call "next job".

Poul-Henning Kamp phk at FreeBSD.org
Tue Mar 17 09:24:35 CET 2015


commit 96f5732ee2944af640a4d06a3ec91652f0bb4259
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date:   Tue Mar 17 08:22:47 2015 +0000

    Make worker threads able to call "next job".
    
    Use this to save a little bit of stack on common requests.
    
    Rename pool_func_t to task_func_t.
    
    Annotate the task functions with __match_proto__

diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h
index 0406d9e..d176ef6 100644
--- a/bin/varnishd/cache/cache.h
+++ b/bin/varnishd/cache/cache.h
@@ -326,11 +326,11 @@ struct wrk_accept {
 
 /* Worker pool stuff -------------------------------------------------*/
 
-typedef void pool_func_t(struct worker *wrk, void *priv);
+typedef void task_func_t(struct worker *wrk, void *priv);
 
 struct pool_task {
 	VTAILQ_ENTRY(pool_task)		list;
-	pool_func_t			*func;
+	task_func_t			*func;
 	void				*priv;
 };
 
@@ -1012,7 +1012,7 @@ void SES_DeletePool(struct sesspool *sp);
 int SES_ScheduleReq(struct req *);
 struct req *SES_GetReq(const struct worker *, struct sess *);
 void SES_ReleaseReq(struct req *);
-pool_func_t SES_pool_accept_task;
+task_func_t SES_pool_accept_task;
 
 
 /* cache_shmlog.c */
diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c
index 71770b1..38703fe 100644
--- a/bin/varnishd/cache/cache_fetch.c
+++ b/bin/varnishd/cache/cache_fetch.c
@@ -875,7 +875,7 @@ vbf_stp_done(void)
 	return (F_STP_DONE);
 }
 
-static void
+static void __match_proto__(task_func_t)
 vbf_fetch_thread(struct worker *wrk, void *priv)
 {
 	struct busyobj *bo;
diff --git a/bin/varnishd/cache/cache_pool.c b/bin/varnishd/cache/cache_pool.c
index 8160092..d1023f3 100644
--- a/bin/varnishd/cache/cache_pool.c
+++ b/bin/varnishd/cache/cache_pool.c
@@ -188,7 +188,7 @@ pool_getidleworker(struct pool *pp)
  * worker workspace.  SES_pool_accept_task() knows about this.
  */
 
-static void
+static void __match_proto__(task_func_t)
 pool_accept(struct worker *wrk, void *arg)
 {
 	struct worker *wrk2;
@@ -228,7 +228,8 @@ pool_accept(struct worker *wrk, void *arg)
 			/* No idle threads, do it ourselves */
 			Lck_Unlock(&pp->mtx);
 			AZ(Pool_Task(pp, &ps->task, POOL_QUEUE_BACK));
-			SES_pool_accept_task(wrk, pp->sesspool);
+			wrk->task.func = SES_pool_accept_task;
+			wrk->task.priv = pp->sesspool;
 			return;
 		}
 		VTAILQ_REMOVE(&pp->idle_queue, &wrk2->task, list);
@@ -310,7 +311,7 @@ Pool_Task(struct pool *pp, struct pool_task *task, enum pool_how how)
  * Empty function used as a pointer value for the thread exit condition.
  */
 
-static void
+static void __match_proto__(task_func_t)
 pool_kiss_of_death(struct worker *wrk, void *priv)
 {
 	(void)wrk;
@@ -321,7 +322,7 @@ pool_kiss_of_death(struct worker *wrk, void *priv)
  * Special function to summ stats
  */
 
-static void __match_proto__(pool_func_t)
+static void __match_proto__(task_func_t)
 pool_stat_summ(struct worker *wrk, void *priv)
 {
 	struct dstat *src;
@@ -345,7 +346,7 @@ void
 Pool_Work_Thread(struct pool *pp, struct worker *wrk)
 {
 	struct pool_task *tp;
-	struct pool_task tps;
+	struct pool_task tpx, tps;
 	int i;
 
 	CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
@@ -394,7 +395,8 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
 				if (i == ETIMEDOUT)
 					VCL_Rel(&wrk->vcl);
 			} while (wrk->task.func == NULL);
-			tp = &wrk->task;
+			tpx = wrk->task;
+			tp = &tpx;
 			wrk->stats->summs++;
 		}
 		Lck_Unlock(&pp->mtx);
@@ -402,8 +404,13 @@ Pool_Work_Thread(struct pool *pp, struct worker *wrk)
 		if (tp->func == pool_kiss_of_death)
 			break;
 
-		assert(wrk->pool == pp);
-		tp->func(wrk, tp->priv);
+		do {
+			memset(&wrk->task, 0, sizeof wrk->task);
+			assert(wrk->pool == pp);
+			tp->func(wrk, tp->priv);
+			tpx = wrk->task;
+			tp = &tpx;
+		} while (tp->func != NULL);
 
 		/* cleanup for next task */
 		wrk->seen_methods = 0;
@@ -588,10 +595,9 @@ pool_mkpool(unsigned pool_no)
 	AZ(pthread_create(&pp->herder_thr, NULL, pool_herder, pp));
 
 	VTAILQ_FOREACH(ls, &heritage.socks, list) {
-		if (ls->sock < 0)
-			continue;
+		assert(ls->sock > 0);		// We know where stdin is
 		ALLOC_OBJ(ps, POOLSOCK_MAGIC);
-		XXXAN(ps);
+		AN(ps);
 		ps->lsock = ls;
 		ps->task.func = pool_accept;
 		ps->task.priv = ps;
diff --git a/bin/varnishd/cache/cache_session.c b/bin/varnishd/cache/cache_session.c
index 0326036..7542b0c 100644
--- a/bin/varnishd/cache/cache_session.c
+++ b/bin/varnishd/cache/cache_session.c
@@ -102,7 +102,7 @@ ses_new(struct sesspool *pp)
  * Process new/existing request on this session.
  */
 
-static void
+static void __match_proto__(task_func_t)
 ses_req_pool_task(struct worker *wrk, void *arg)
 {
 	struct req *req;
@@ -125,7 +125,7 @@ ses_req_pool_task(struct worker *wrk, void *arg)
  * Allocate a request + vxid, call ses_req_pool_task()
  */
 
-static void
+static void __match_proto__(task_func_t)
 ses_sess_pool_task(struct worker *wrk, void *arg)
 {
 	struct req *req;
@@ -138,7 +138,9 @@ ses_sess_pool_task(struct worker *wrk, void *arg)
 	CHECK_OBJ_NOTNULL(req, REQ_MAGIC);
 
 	sp->sess_step = S_STP_NEWREQ;
-	ses_req_pool_task(wrk, req);
+
+	wrk->task.func = ses_req_pool_task;
+	wrk->task.priv = req;
 }
 
 /*--------------------------------------------------------------------
@@ -188,7 +190,7 @@ ses_vsl_socket(struct sess *sp, const char *lsockname)
  * Called from assigned worker thread
  */
 
-void
+void __match_proto__(task_func_t)
 SES_pool_accept_task(struct worker *wrk, void *arg)
 {
 	struct sesspool *pp;
@@ -214,7 +216,8 @@ SES_pool_accept_task(struct worker *wrk, void *arg)
 	lsockname = VCA_SetupSess(wrk, sp);
 	ses_vsl_socket(sp, lsockname);
 
-	ses_sess_pool_task(wrk, sp);
+	wrk->task.func = ses_sess_pool_task;
+	wrk->task.priv = sp;
 }
 
 /*--------------------------------------------------------------------



More information about the varnish-commit mailing list