[master] e4fa4c7 Rename the childs copy of params to cache_param so it is clear that it belongs there.
Poul-Henning Kamp
phk at varnish-cache.org
Thu Nov 10 11:08:41 CET 2011
commit e4fa4c7ab40a7514395645cbe6e1227d580d4882
Author: Poul-Henning Kamp <phk at FreeBSD.org>
Date: Thu Nov 10 09:05:18 2011 +0000
Rename the childs copy of params to cache_param so it is clear that
it belongs there.
diff --git a/bin/varnishd/cache.h b/bin/varnishd/cache.h
index 31db957..5941fcc 100644
--- a/bin/varnishd/cache.h
+++ b/bin/varnishd/cache.h
@@ -873,7 +873,7 @@ void WSL_Flush(struct worker *w, int overflow);
#define DSL(flag, tag, id, ...) \
do { \
- if (params->diag_bitmap & (flag)) \
+ if (cache_param->diag_bitmap & (flag)) \
VSL((tag), (id), __VA_ARGS__); \
} while (0)
diff --git a/bin/varnishd/cache_acceptor.c b/bin/varnishd/cache_acceptor.c
index 03a1a34..550a53f 100644
--- a/bin/varnishd/cache_acceptor.c
+++ b/bin/varnishd/cache_acceptor.c
@@ -121,7 +121,7 @@ VCA_Prep(struct sess *sp)
addr, sizeof addr, port, sizeof port);
sp->addr = WS_Dup(sp->ws, addr);
sp->port = WS_Dup(sp->ws, port);
- if (params->log_local_addr) {
+ if (cache_param->log_local_addr) {
AZ(getsockname(sp->fd, (void*)&sp->mysockaddr, &sp->mysockaddrlen));
VTCP_name(&sp->mysockaddr, sp->mysockaddrlen,
addr, sizeof addr, port, sizeof port);
@@ -176,9 +176,9 @@ vca_pace_bad(void)
{
Lck_Lock(&pace_mtx);
- vca_pace += params->acceptor_sleep_incr;
- if (vca_pace > params->acceptor_sleep_max)
- vca_pace = params->acceptor_sleep_max;
+ vca_pace += cache_param->acceptor_sleep_incr;
+ if (vca_pace > cache_param->acceptor_sleep_max)
+ vca_pace = cache_param->acceptor_sleep_max;
Lck_Unlock(&pace_mtx);
}
@@ -189,8 +189,8 @@ vca_pace_good(void)
if (vca_pace == 0.0)
return;
Lck_Lock(&pace_mtx);
- vca_pace *= params->acceptor_sleep_decay;
- if (vca_pace < params->acceptor_sleep_incr)
+ vca_pace *= cache_param->acceptor_sleep_decay;
+ if (vca_pace < cache_param->acceptor_sleep_incr)
vca_pace = 0.0;
Lck_Unlock(&pace_mtx);
}
@@ -306,7 +306,7 @@ vca_acct(void *arg)
VTAILQ_FOREACH(ls, &heritage.socks, list) {
if (ls->sock < 0)
continue;
- AZ(listen(ls->sock, params->listen_depth));
+ AZ(listen(ls->sock, cache_param->listen_depth));
AZ(setsockopt(ls->sock, SOL_SOCKET, SO_LINGER,
&linger, sizeof linger));
}
@@ -318,9 +318,9 @@ vca_acct(void *arg)
while (1) {
(void)sleep(1);
#ifdef SO_SNDTIMEO_WORKS
- if (params->idle_send_timeout != send_timeout) {
+ if (cache_param->idle_send_timeout != send_timeout) {
need_test = 1;
- send_timeout = params->idle_send_timeout;
+ send_timeout = cache_param->idle_send_timeout;
tv_sndtimeo = VTIM_timeval(send_timeout);
VTAILQ_FOREACH(ls, &heritage.socks, list) {
if (ls->sock < 0)
@@ -332,9 +332,9 @@ vca_acct(void *arg)
}
#endif
#ifdef SO_RCVTIMEO_WORKS
- if (params->sess_timeout != sess_timeout) {
+ if (cache_param->sess_timeout != sess_timeout) {
need_test = 1;
- sess_timeout = params->sess_timeout;
+ sess_timeout = cache_param->sess_timeout;
tv_rcvtimeo = VTIM_timeval(sess_timeout);
VTAILQ_FOREACH(ls, &heritage.socks, list) {
if (ls->sock < 0)
diff --git a/bin/varnishd/cache_backend.c b/bin/varnishd/cache_backend.c
index c94d819..e9d0323 100644
--- a/bin/varnishd/cache_backend.c
+++ b/bin/varnishd/cache_backend.c
@@ -96,7 +96,7 @@ VBE_ReleaseConn(struct vbc *vc)
if (dst == 0.0) \
dst = be->tmx; \
if (dst == 0.0) \
- dst = params->tmx; \
+ dst = cache_param->tmx; \
} while (0)
/*--------------------------------------------------------------------
@@ -158,7 +158,7 @@ bes_conn_try(const struct sess *sp, struct vbc *vc, const struct vdi_simple *vs)
/* release lock during stuff that can take a long time */
- if (params->prefer_ipv6 && bp->ipv6 != NULL) {
+ if (cache_param->prefer_ipv6 && bp->ipv6 != NULL) {
s = vbe_TryConnect(sp, PF_INET6, bp->ipv6, bp->ipv6len, vs);
vc->addr = bp->ipv6;
vc->addrlen = bp->ipv6len;
@@ -168,7 +168,7 @@ bes_conn_try(const struct sess *sp, struct vbc *vc, const struct vdi_simple *vs)
vc->addr = bp->ipv4;
vc->addrlen = bp->ipv4len;
}
- if (s == -1 && !params->prefer_ipv6 && bp->ipv6 != NULL) {
+ if (s == -1 && !cache_param->prefer_ipv6 && bp->ipv6 != NULL) {
s = vbe_TryConnect(sp, PF_INET6, bp->ipv6, bp->ipv6len, vs);
vc->addr = bp->ipv6;
vc->addrlen = bp->ipv6len;
@@ -232,7 +232,7 @@ vbe_NewConn(void)
* It evaluates if a backend is healthy _for_a_specific_object_.
* That means that it relies on sp->objcore->objhead. This is mainly for
* saint-mode, but also takes backend->healthy into account. If
- * params->saintmode_threshold is 0, this is basically just a test of
+ * cache_param->saintmode_threshold is 0, this is basically just a test of
* backend->healthy.
*
* The threshold has to be evaluated _after_ the timeout check, otherwise
@@ -266,7 +266,7 @@ vbe_Healthy(const struct vdi_simple *vs, const struct sess *sp)
* specified by VCL (thus use param).
*/
if (vs->vrt->saintmode_threshold == UINT_MAX)
- threshold = params->saintmode_threshold;
+ threshold = cache_param->saintmode_threshold;
else
threshold = vs->vrt->saintmode_threshold;
diff --git a/bin/varnishd/cache_backend_poll.c b/bin/varnishd/cache_backend_poll.c
index eb6cc45..efd64cb 100644
--- a/bin/varnishd/cache_backend_poll.c
+++ b/bin/varnishd/cache_backend_poll.c
@@ -138,7 +138,7 @@ vbp_poke(struct vbp_target *vt)
tmo = (int)round((t_end - t_now) * 1e3);
s = -1;
- if (params->prefer_ipv6 && bp->ipv6 != NULL) {
+ if (cache_param->prefer_ipv6 && bp->ipv6 != NULL) {
s = vbp_connect(PF_INET6, bp->ipv6, bp->ipv6len, tmo);
t_now = VTIM_real();
tmo = (int)round((t_end - t_now) * 1e3);
diff --git a/bin/varnishd/cache_ban.c b/bin/varnishd/cache_ban.c
index ac45144..768e631 100644
--- a/bin/varnishd/cache_ban.c
+++ b/bin/varnishd/cache_ban.c
@@ -407,7 +407,7 @@ BAN_Insert(struct ban *b)
VSC_C_main->bans_req++;
be = VTAILQ_LAST(&ban_head, banhead_s);
- if (params->ban_dups && be != b)
+ if (cache_param->ban_dups && be != b)
be->refcount++;
else
be = NULL;
@@ -825,13 +825,13 @@ ban_lurker_work(const struct sess *sp, unsigned pass)
b->flags &= ~BAN_F_LURK;
b->flags |= pass;
}
- if (params->diag_bitmap & 0x80000)
+ if (cache_param->diag_bitmap & 0x80000)
VSL(SLT_Debug, 0, "lurker: %d actionable bans", i);
if (i == 0)
return (0);
VTAILQ_FOREACH_REVERSE(b, &ban_head, banhead_s, list) {
- if (params->diag_bitmap & 0x80000)
+ if (cache_param->diag_bitmap & 0x80000)
VSL(SLT_Debug, 0, "lurker doing %f %d",
ban_time(b->spec), b->refcount);
while (1) {
@@ -840,7 +840,7 @@ ban_lurker_work(const struct sess *sp, unsigned pass)
if (oc == NULL)
break;
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
- if (params->diag_bitmap & 0x80000)
+ if (cache_param->diag_bitmap & 0x80000)
VSL(SLT_Debug, 0, "test: %p %d %d",
oc, oc->flags & OC_F_LURK, pass);
if ((oc->flags & OC_F_LURK) == pass)
@@ -849,7 +849,7 @@ ban_lurker_work(const struct sess *sp, unsigned pass)
CHECK_OBJ_NOTNULL(oh, OBJHEAD_MAGIC);
if (Lck_Trylock(&oh->mtx)) {
Lck_Unlock(&ban_mtx);
- VTIM_sleep(params->ban_lurker_sleep);
+ VTIM_sleep(cache_param->ban_lurker_sleep);
continue;
}
/*
@@ -863,7 +863,7 @@ ban_lurker_work(const struct sess *sp, unsigned pass)
if (oc2 == NULL) {
Lck_Unlock(&oh->mtx);
Lck_Unlock(&ban_mtx);
- VTIM_sleep(params->ban_lurker_sleep);
+ VTIM_sleep(cache_param->ban_lurker_sleep);
continue;
}
/*
@@ -879,7 +879,7 @@ ban_lurker_work(const struct sess *sp, unsigned pass)
*/
o = oc_getobj(sp->wrk, oc);
i = ban_check_object(o, sp, 0);
- if (params->diag_bitmap & 0x80000)
+ if (cache_param->diag_bitmap & 0x80000)
VSL(SLT_Debug, 0, "lurker got: %p %d",
oc, i);
if (i == -1) {
@@ -891,11 +891,11 @@ ban_lurker_work(const struct sess *sp, unsigned pass)
Lck_Unlock(&ban_mtx);
}
Lck_Unlock(&oh->mtx);
- if (params->diag_bitmap & 0x80000)
+ if (cache_param->diag_bitmap & 0x80000)
VSL(SLT_Debug, 0, "lurker done: %p %d %d",
oc, oc->flags & OC_F_LURK, pass);
(void)HSH_Deref(sp->wrk, NULL, &o);
- VTIM_sleep(params->ban_lurker_sleep);
+ VTIM_sleep(cache_param->ban_lurker_sleep);
}
Lck_AssertHeld(&ban_mtx);
if (!(b->flags & BAN_F_REQ)) {
@@ -903,12 +903,12 @@ ban_lurker_work(const struct sess *sp, unsigned pass)
b->flags |= BAN_F_GONE;
VSC_C_main->bans_gone++;
}
- if (params->diag_bitmap & 0x80000)
+ if (cache_param->diag_bitmap & 0x80000)
VSL(SLT_Debug, 0, "lurker BAN %f now gone",
ban_time(b->spec));
}
Lck_Unlock(&ban_mtx);
- VTIM_sleep(params->ban_lurker_sleep);
+ VTIM_sleep(cache_param->ban_lurker_sleep);
if (b == b0)
break;
}
@@ -925,7 +925,7 @@ ban_lurker(struct sess *sp, void *priv)
(void)priv;
while (1) {
- while (params->ban_lurker_sleep == 0.0) {
+ while (cache_param->ban_lurker_sleep == 0.0) {
/*
* Ban-lurker is disabled:
* Clean the last ban, if possible, and sleep
@@ -947,7 +947,7 @@ ban_lurker(struct sess *sp, void *priv)
pass &= BAN_F_LURK;
if (pass == 0)
pass += (1 << LURK_SHIFT);
- VTIM_sleep(params->ban_lurker_sleep);
+ VTIM_sleep(cache_param->ban_lurker_sleep);
} else {
VTIM_sleep(1.0);
}
@@ -1064,14 +1064,14 @@ ccf_ban_list(struct cli *cli, const char * const *av, void *priv)
VCLI_Out(cli, "Present bans:\n");
VTAILQ_FOREACH(b, &ban_head, list) {
- if (b == bl && !(params->diag_bitmap & 0x80000))
+ if (b == bl && !(cache_param->diag_bitmap & 0x80000))
break;
VCLI_Out(cli, "%10.6f %5u%s\t", ban_time(b->spec),
bl == b ? b->refcount - 1 : b->refcount,
b->flags & BAN_F_GONE ? "G" : " ");
ban_render(cli, b->spec);
VCLI_Out(cli, "\n");
- if (params->diag_bitmap & 0x80000) {
+ if (cache_param->diag_bitmap & 0x80000) {
Lck_Lock(&ban_mtx);
struct objcore *oc;
VTAILQ_FOREACH(oc, &b->objcore, ban_list)
diff --git a/bin/varnishd/cache_center.c b/bin/varnishd/cache_center.c
index 37c4b7b..e42fac8 100644
--- a/bin/varnishd/cache_center.c
+++ b/bin/varnishd/cache_center.c
@@ -94,11 +94,11 @@ cnt_wait(struct sess *sp)
assert(sp->xid == 0);
i = HTC_Complete(sp->htc);
- if (i == 0 && params->session_linger > 0) {
+ if (i == 0 && cache_param->session_linger > 0) {
pfd[0].fd = sp->fd;
pfd[0].events = POLLIN;
pfd[0].revents = 0;
- i = poll(pfd, 1, params->session_linger);
+ i = poll(pfd, 1, cache_param->session_linger);
if (i)
i = HTC_Rx(sp->htc);
}
@@ -183,7 +183,7 @@ cnt_prepresp(struct sess *sp)
sp->wrk->res_mode |= RES_ESI_CHILD;
}
- if (params->http_gzip_support && sp->obj->gziped &&
+ if (cache_param->http_gzip_support && sp->obj->gziped &&
!RFC2616_Req_Gzip(sp)) {
/*
* We don't know what it uncompresses to
@@ -212,7 +212,7 @@ cnt_prepresp(struct sess *sp)
sp->t_resp = VTIM_real();
if (sp->obj->objcore != NULL) {
- if ((sp->t_resp - sp->obj->last_lru) > params->lru_timeout &&
+ if ((sp->t_resp - sp->obj->last_lru) > cache_param->lru_timeout &&
EXP_Touch(sp->obj->objcore))
sp->obj->last_lru = sp->t_resp;
sp->obj->last_use = sp->t_resp; /* XXX: locking ? */
@@ -224,7 +224,7 @@ cnt_prepresp(struct sess *sp)
case VCL_RET_DELIVER:
break;
case VCL_RET_RESTART:
- if (sp->restarts >= params->max_restarts)
+ if (sp->restarts >= cache_param->max_restarts)
break;
if (sp->wrk->do_stream) {
VDI_CloseFd(sp->wrk);
@@ -373,7 +373,7 @@ cnt_done(struct sess *sp)
return (1);
}
- if (sp->wrk->stats.client_req >= params->wthread_stats_rate)
+ if (sp->wrk->stats.client_req >= cache_param->wthread_stats_rate)
WRK_SumStat(sp->wrk);
/* Reset the workspace to the session-watermark */
WS_Reset(sp->ws, sp->ws_ses);
@@ -390,7 +390,7 @@ cnt_done(struct sess *sp)
sp->step = STP_WAIT;
return (0);
}
- if (params->session_linger > 0) {
+ if (cache_param->session_linger > 0) {
sp->wrk->stats.sess_linger++;
sp->step = STP_WAIT;
return (0);
@@ -436,12 +436,12 @@ cnt_error(struct sess *sp)
if (sp->obj == NULL) {
HSH_Prealloc(sp);
EXP_Clr(&w->exp);
- sp->obj = STV_NewObject(sp, NULL, params->http_resp_size,
- &w->exp, (uint16_t)params->http_max_hdr);
+ sp->obj = STV_NewObject(sp, NULL, cache_param->http_resp_size,
+ &w->exp, (uint16_t)cache_param->http_max_hdr);
if (sp->obj == NULL)
sp->obj = STV_NewObject(sp, TRANSIENT_STORAGE,
- params->http_resp_size, &w->exp,
- (uint16_t)params->http_max_hdr);
+ cache_param->http_resp_size, &w->exp,
+ (uint16_t)cache_param->http_max_hdr);
if (sp->obj == NULL) {
sp->doclose = "Out of objects";
sp->director = NULL;
@@ -477,7 +477,7 @@ cnt_error(struct sess *sp)
VCL_error_method(sp);
if (sp->handling == VCL_RET_RESTART &&
- sp->restarts < params->max_restarts) {
+ sp->restarts < cache_param->max_restarts) {
HSH_Drop(sp);
sp->director = NULL;
sp->restarts++;
@@ -701,7 +701,7 @@ cnt_fetchbody(struct sess *sp)
AZ(sp->wrk->vfp);
/* We do nothing unless the param is set */
- if (!params->http_gzip_support)
+ if (!cache_param->http_gzip_support)
sp->wrk->do_gzip = sp->wrk->do_gunzip = 0;
sp->wrk->is_gzip =
@@ -768,7 +768,7 @@ cnt_fetchbody(struct sess *sp)
*/
l += strlen("Content-Length: XxxXxxXxxXxxXxxXxx") + sizeof(void *);
- if (sp->wrk->exp.ttl < params->shortlived || sp->objcore == NULL)
+ if (sp->wrk->exp.ttl < cache_param->shortlived || sp->objcore == NULL)
sp->wrk->storage_hint = TRANSIENT_STORAGE;
sp->obj = STV_NewObject(sp, sp->wrk->storage_hint, l,
@@ -780,8 +780,8 @@ cnt_fetchbody(struct sess *sp)
*/
sp->obj = STV_NewObject(sp, TRANSIENT_STORAGE, l,
&sp->wrk->exp, nhttp);
- if (sp->wrk->exp.ttl > params->shortlived)
- sp->wrk->exp.ttl = params->shortlived;
+ if (sp->wrk->exp.ttl > cache_param->shortlived)
+ sp->wrk->exp.ttl = cache_param->shortlived;
sp->wrk->exp.grace = 0.0;
sp->wrk->exp.keep = 0.0;
}
@@ -893,7 +893,7 @@ cnt_streambody(struct sess *sp)
int i;
struct stream_ctx sctx;
uint8_t obuf[sp->wrk->res_mode & RES_GUNZIP ?
- params->gzip_stack_buffer : 1];
+ cache_param->gzip_stack_buffer : 1];
memset(&sctx, 0, sizeof sctx);
sctx.magic = STREAM_CTX_MAGIC;
@@ -967,8 +967,8 @@ cnt_first(struct sess *sp)
sp->ws_ses = WS_Snapshot(sp->ws);
/* Receive a HTTP protocol request */
- HTC_Init(sp->htc, sp->ws, sp->fd, sp->vsl_id, params->http_req_size,
- params->http_req_hdr_len);
+ HTC_Init(sp->htc, sp->ws, sp->fd, sp->vsl_id, cache_param->http_req_size,
+ cache_param->http_req_hdr_len);
sp->wrk->lastused = sp->t_open;
sp->wrk->acct_tmp.sess++;
@@ -1186,7 +1186,7 @@ cnt_miss(struct sess *sp)
http_Setup(sp->wrk->bereq, sp->wrk->ws);
http_FilterHeader(sp, HTTPH_R_FETCH);
http_ForceGet(sp->wrk->bereq);
- if (params->http_gzip_support) {
+ if (cache_param->http_gzip_support) {
/*
* We always ask the backend for gzip, even if the
* client doesn't grok it. We will uncompress for
@@ -1377,7 +1377,7 @@ cnt_recv(struct sess *sp)
VCL_recv_method(sp);
recv_handling = sp->handling;
- if (sp->restarts >= params->max_restarts) {
+ if (sp->restarts >= cache_param->max_restarts) {
if (sp->err_code == 0)
sp->err_code = 503;
sp->step = STP_ERROR;
@@ -1392,7 +1392,7 @@ cnt_recv(struct sess *sp)
sp->wrk->do_gunzip = 0;
sp->wrk->do_stream = 0;
- if (params->http_gzip_support &&
+ if (cache_param->http_gzip_support &&
(recv_handling != VCL_RET_PIPE) &&
(recv_handling != VCL_RET_PASS)) {
if (RFC2616_Req_Gzip(sp)) {
@@ -1607,7 +1607,7 @@ CNT_Session(struct sess *sp)
switch (sp->step) {
#define STEP(l,u) \
case STP_##u: \
- if (params->diag_bitmap & 0x01) \
+ if (cache_param->diag_bitmap & 0x01) \
cnt_diag(sp, #u); \
done = cnt_##l(sp); \
break;
diff --git a/bin/varnishd/cache_cli.c b/bin/varnishd/cache_cli.c
index 30a12e3..ac2afc1 100644
--- a/bin/varnishd/cache_cli.c
+++ b/bin/varnishd/cache_cli.c
@@ -235,7 +235,7 @@ CLI_Init(void)
Lck_New(&cli_mtx, lck_cli);
cli_thread = pthread_self();
- cls = VCLS_New(cli_cb_before, cli_cb_after, params->cli_buffer);
+ cls = VCLS_New(cli_cb_before, cli_cb_after, cache_param->cli_buffer);
AN(cls);
CLI_AddFuncs(master_cmds);
diff --git a/bin/varnishd/cache_esi_deliver.c b/bin/varnishd/cache_esi_deliver.c
index 2c62bb7..4051027 100644
--- a/bin/varnishd/cache_esi_deliver.c
+++ b/bin/varnishd/cache_esi_deliver.c
@@ -52,7 +52,7 @@ ved_include(struct sess *sp, const char *src, const char *host)
w = sp->wrk;
- if (sp->esi_level >= params->max_esi_depth)
+ if (sp->esi_level >= cache_param->max_esi_depth)
return;
sp->esi_level++;
@@ -231,7 +231,7 @@ ESI_Deliver(struct sess *sp)
uint8_t tailbuf[8 + 5];
int isgzip;
struct vgz *vgz = NULL;
- char obuf[params->gzip_stack_buffer];
+ char obuf[cache_param->gzip_stack_buffer];
ssize_t obufl = 0;
size_t dl;
const void *dp;
diff --git a/bin/varnishd/cache_esi_fetch.c b/bin/varnishd/cache_esi_fetch.c
index f414e7b..5ec8f6b 100644
--- a/bin/varnishd/cache_esi_fetch.c
+++ b/bin/varnishd/cache_esi_fetch.c
@@ -51,7 +51,7 @@ vef_read(struct worker *w, struct http_conn *htc, void *buf, ssize_t buflen,
if (buflen < bytes)
bytes = buflen;
- if (params->esi_syntax & 0x8) {
+ if (cache_param->esi_syntax & 0x8) {
d = (random() & 3) + 1;
if (d < bytes)
bytes = d;
@@ -96,7 +96,7 @@ vfp_esi_bytes_gu(struct worker *w, struct http_conn *htc, ssize_t bytes)
{
struct vgz *vg;
ssize_t wl;
- uint8_t ibuf[params->gzip_stack_buffer];
+ uint8_t ibuf[cache_param->gzip_stack_buffer];
int i;
size_t dl;
const void *dp;
@@ -207,7 +207,7 @@ static int
vfp_esi_bytes_ug(struct worker *w, struct http_conn *htc, ssize_t bytes)
{
ssize_t wl;
- char ibuf[params->gzip_stack_buffer];
+ char ibuf[cache_param->gzip_stack_buffer];
struct vef_priv *vef;
CHECK_OBJ_NOTNULL(w, WORKER_MAGIC);
@@ -244,8 +244,8 @@ static int
vfp_esi_bytes_gg(struct worker *w, struct http_conn *htc, size_t bytes)
{
ssize_t wl;
- char ibuf[params->gzip_stack_buffer];
- char ibuf2[params->gzip_stack_buffer];
+ char ibuf[cache_param->gzip_stack_buffer];
+ char ibuf2[cache_param->gzip_stack_buffer];
struct vef_priv *vef;
size_t dl;
const void *dp;
diff --git a/bin/varnishd/cache_esi_parse.c b/bin/varnishd/cache_esi_parse.c
index f6eb367..9e2b4f6 100644
--- a/bin/varnishd/cache_esi_parse.c
+++ b/bin/varnishd/cache_esi_parse.c
@@ -269,7 +269,7 @@ static void
vep_emit_skip(const struct vep_state *vep, ssize_t l)
{
- if (params->esi_syntax & 0x20) {
+ if (cache_param->esi_syntax & 0x20) {
Debug("---> SKIP(%jd)\n", (intmax_t)l);
}
vep_emit_len(vep, l, VEC_S1, VEC_S2, VEC_S8);
@@ -280,7 +280,7 @@ vep_emit_verbatim(const struct vep_state *vep, ssize_t l, ssize_t l_crc)
{
uint8_t buf[4];
- if (params->esi_syntax & 0x20) {
+ if (cache_param->esi_syntax & 0x20) {
Debug("---> VERBATIM(%jd)\n", (intmax_t)l);
}
vep_emit_len(vep, l, VEC_V1, VEC_V2, VEC_V8);
@@ -585,7 +585,7 @@ VEP_Parse(const struct worker *w, const char *p, size_t l)
*/
if (vep->state == VEP_START) {
- if (params->esi_syntax & 0x1)
+ if (cache_param->esi_syntax & 0x1)
vep->state = VEP_NEXTTAG;
else
vep->state = VEP_TESTXML;
@@ -618,7 +618,7 @@ VEP_Parse(const struct worker *w, const char *p, size_t l)
*/
} else if (vep->state == VEP_NOTMYTAG) {
- if (params->esi_syntax & 0x2) {
+ if (cache_param->esi_syntax & 0x2) {
p++;
vep->state = VEP_NEXTTAG;
} else {
diff --git a/bin/varnishd/cache_expire.c b/bin/varnishd/cache_expire.c
index f7f779d..23e3fc6 100644
--- a/bin/varnishd/cache_expire.c
+++ b/bin/varnishd/cache_expire.c
@@ -113,7 +113,7 @@ EXP_Keep(const struct sess *sp, const struct object *o)
{
double r;
- r = (double)params->default_keep;
+ r = (double)cache_param->default_keep;
if (o->exp.keep > 0.)
r = o->exp.keep;
if (sp != NULL && sp->exp.keep > 0. && sp->exp.keep < r)
@@ -126,7 +126,7 @@ EXP_Grace(const struct sess *sp, const struct object *o)
{
double r;
- r = (double)params->default_grace;
+ r = (double)cache_param->default_grace;
if (o->exp.grace >= 0.)
r = o->exp.grace;
if (sp != NULL && sp->exp.grace > 0. && sp->exp.grace < r)
@@ -344,7 +344,7 @@ exp_timer(struct sess *sp, void *priv)
if (oc == NULL) {
WSL_Flush(sp->wrk, 0);
WRK_SumStat(sp->wrk);
- VTIM_sleep(params->expiry_sleep);
+ VTIM_sleep(cache_param->expiry_sleep);
t = VTIM_real();
}
diff --git a/bin/varnishd/cache_fetch.c b/bin/varnishd/cache_fetch.c
index 67bf970..a678dcc 100644
--- a/bin/varnishd/cache_fetch.c
+++ b/bin/varnishd/cache_fetch.c
@@ -188,7 +188,7 @@ FetchStorage(struct worker *w, ssize_t sz)
if (l == 0)
l = sz;
if (l == 0)
- l = params->fetch_chunksize * 1024LL;
+ l = cache_param->fetch_chunksize * 1024LL;
st = STV_alloc(w, l);
if (st == NULL) {
(void)FetchError(w, "Could not get storage");
@@ -437,8 +437,8 @@ FetchHdr(struct sess *sp)
/* Receive response */
- HTC_Init(w->htc, w->ws, vc->fd, vc->vsl_id, params->http_resp_size,
- params->http_resp_hdr_len);
+ HTC_Init(w->htc, w->ws, vc->fd, vc->vsl_id, cache_param->http_resp_size,
+ cache_param->http_resp_hdr_len);
VTCP_set_read_timeout(vc->fd, vc->first_byte_timeout);
diff --git a/bin/varnishd/cache_gzip.c b/bin/varnishd/cache_gzip.c
index 6da1595..32a7413 100644
--- a/bin/varnishd/cache_gzip.c
+++ b/bin/varnishd/cache_gzip.c
@@ -131,7 +131,7 @@ vgz_alloc_vgz(struct worker *wrk, const char *id)
vg->wrk = wrk;
vg->id = id;
- switch (params->gzip_tmp_space) {
+ switch (cache_param->gzip_tmp_space) {
case 0:
case 1:
/* malloc, the default */
@@ -196,10 +196,10 @@ VGZ_NewGzip(struct worker *wrk, const char *id)
* XXX: too many worker threads grow the stacks.
*/
i = deflateInit2(&vg->vz,
- params->gzip_level, /* Level */
+ cache_param->gzip_level, /* Level */
Z_DEFLATED, /* Method */
- 16 + params->gzip_window, /* Window bits (16=gzip + 15) */
- params->gzip_memlevel, /* memLevel */
+ 16 + cache_param->gzip_window, /* Window bits (16=gzip + 15) */
+ cache_param->gzip_memlevel, /* memLevel */
Z_DEFAULT_STRATEGY);
assert(Z_OK == i);
return (vg);
@@ -467,7 +467,7 @@ vfp_gunzip_bytes(struct worker *w, struct http_conn *htc, ssize_t bytes)
struct vgz *vg;
ssize_t l, wl;
int i = -100;
- uint8_t ibuf[params->gzip_stack_buffer];
+ uint8_t ibuf[cache_param->gzip_stack_buffer];
size_t dl;
const void *dp;
@@ -545,7 +545,7 @@ vfp_gzip_bytes(struct worker *w, struct http_conn *htc, ssize_t bytes)
struct vgz *vg;
ssize_t l, wl;
int i = -100;
- uint8_t ibuf[params->gzip_stack_buffer];
+ uint8_t ibuf[cache_param->gzip_stack_buffer];
size_t dl;
const void *dp;
@@ -632,7 +632,7 @@ vfp_testgzip_bytes(struct worker *w, struct http_conn *htc, ssize_t bytes)
struct vgz *vg;
ssize_t l, wl;
int i = -100;
- uint8_t obuf[params->gzip_stack_buffer];
+ uint8_t obuf[cache_param->gzip_stack_buffer];
size_t dl;
const void *dp;
struct storage *st;
diff --git a/bin/varnishd/cache_hash.c b/bin/varnishd/cache_hash.c
index ecce40d..669fd40 100644
--- a/bin/varnishd/cache_hash.c
+++ b/bin/varnishd/cache_hash.c
@@ -169,7 +169,7 @@ HSH_AddString(const struct sess *sp, const char *str)
SHA256_Update(sp->wrk->sha256ctx, str, l);
SHA256_Update(sp->wrk->sha256ctx, "#", 1);
- if (params->log_hash)
+ if (cache_param->log_hash)
WSP(sp, SLT_Hash, "%s", str);
}
@@ -267,7 +267,7 @@ HSH_Insert(const struct sess *sp)
w = sp->wrk;
HSH_Prealloc(sp);
- if (params->diag_bitmap & 0x80000000)
+ if (cache_param->diag_bitmap & 0x80000000)
hsh_testmagic(sp->wrk->nobjhead->digest);
AZ(sp->hash_objhead);
@@ -316,7 +316,7 @@ HSH_Lookup(struct sess *sp, struct objhead **poh)
HSH_Prealloc(sp);
memcpy(sp->wrk->nobjhead->digest, sp->digest, sizeof sp->digest);
- if (params->diag_bitmap & 0x80000000)
+ if (cache_param->diag_bitmap & 0x80000000)
hsh_testmagic(sp->wrk->nobjhead->digest);
if (sp->hash_objhead != NULL) {
@@ -436,7 +436,7 @@ HSH_Lookup(struct sess *sp, struct objhead **poh)
}
VTAILQ_INSERT_TAIL(&oh->waitinglist->list, sp, list);
}
- if (params->diag_bitmap & 0x20)
+ if (cache_param->diag_bitmap & 0x20)
WSP(sp, SLT_Debug,
"on waiting list <%p>", oh);
SES_Charge(sp);
@@ -492,7 +492,7 @@ hsh_rush(struct objhead *oh)
Lck_AssertHeld(&oh->mtx);
wl = oh->waitinglist;
CHECK_OBJ_NOTNULL(wl, WAITINGLIST_MAGIC);
- for (u = 0; u < params->rush_exponent; u++) {
+ for (u = 0; u < cache_param->rush_exponent; u++) {
sp = VTAILQ_FIRST(&wl->list);
if (sp == NULL)
break;
@@ -616,7 +616,7 @@ HSH_Unbusy(const struct sess *sp)
assert(oh->refcnt > 0);
if (o->ws_o->overflow)
sp->wrk->stats.n_objoverflow++;
- if (params->diag_bitmap & 0x40)
+ if (cache_param->diag_bitmap & 0x40)
WSP(sp, SLT_Debug,
"Object %u workspace free %u", o->xid, WS_Free(o->ws_o));
diff --git a/bin/varnishd/cache_lck.c b/bin/varnishd/cache_lck.c
index ee93b9f..2aef6dc 100644
--- a/bin/varnishd/cache_lck.c
+++ b/bin/varnishd/cache_lck.c
@@ -64,7 +64,7 @@ Lck__Lock(struct lock *lck, const char *p, const char *f, int l)
int r;
CAST_OBJ_NOTNULL(ilck, lck->priv, ILCK_MAGIC);
- if (!(params->diag_bitmap & 0x18)) {
+ if (!(cache_param->diag_bitmap & 0x18)) {
AZ(pthread_mutex_lock(&ilck->mtx));
AZ(ilck->held);
ilck->stat->locks++;
@@ -76,11 +76,11 @@ Lck__Lock(struct lock *lck, const char *p, const char *f, int l)
assert(r == 0 || r == EBUSY);
if (r) {
ilck->stat->colls++;
- if (params->diag_bitmap & 0x8)
+ if (cache_param->diag_bitmap & 0x8)
VSL(SLT_Debug, 0, "MTX_CONTEST(%s,%s,%d,%s)",
p, f, l, ilck->w);
AZ(pthread_mutex_lock(&ilck->mtx));
- } else if (params->diag_bitmap & 0x8) {
+ } else if (cache_param->diag_bitmap & 0x8) {
VSL(SLT_Debug, 0, "MTX_LOCK(%s,%s,%d,%s)", p, f, l, ilck->w);
}
AZ(ilck->held);
@@ -99,7 +99,7 @@ Lck__Unlock(struct lock *lck, const char *p, const char *f, int l)
AN(ilck->held);
ilck->held = 0;
AZ(pthread_mutex_unlock(&ilck->mtx));
- if (params->diag_bitmap & 0x8)
+ if (cache_param->diag_bitmap & 0x8)
VSL(SLT_Debug, 0, "MTX_UNLOCK(%s,%s,%d,%s)", p, f, l, ilck->w);
}
@@ -112,7 +112,7 @@ Lck__Trylock(struct lock *lck, const char *p, const char *f, int l)
CAST_OBJ_NOTNULL(ilck, lck->priv, ILCK_MAGIC);
r = pthread_mutex_trylock(&ilck->mtx);
assert(r == 0 || r == EBUSY);
- if (params->diag_bitmap & 0x8)
+ if (cache_param->diag_bitmap & 0x8)
VSL(SLT_Debug, 0,
"MTX_TRYLOCK(%s,%s,%d,%s) = %d", p, f, l, ilck->w);
if (r == 0) {
diff --git a/bin/varnishd/cache_main.c b/bin/varnishd/cache_main.c
index c610bc3..5dc2d00 100644
--- a/bin/varnishd/cache_main.c
+++ b/bin/varnishd/cache_main.c
@@ -37,7 +37,7 @@
#include "waiter/cache_waiter.h"
#include "hash/hash_slinger.h"
-volatile struct params *params;
+volatile struct params *cache_param;
/*--------------------------------------------------------------------
* Per thread storage for the session currently being processed by
@@ -134,7 +134,7 @@ child_main(void)
BAN_Compile();
/* Wait for persistent storage to load if asked to */
- if (params->diag_bitmap & 0x00020000)
+ if (cache_param->diag_bitmap & 0x00020000)
SMP_Ready();
CLI_Run();
diff --git a/bin/varnishd/cache_panic.c b/bin/varnishd/cache_panic.c
index 4076d26..f69e90b 100644
--- a/bin/varnishd/cache_panic.c
+++ b/bin/varnishd/cache_panic.c
@@ -346,7 +346,7 @@ pan_ic(const char *func, const char *file, int line, const char *cond,
pan_backtrace();
- if (!(params->diag_bitmap & 0x2000)) {
+ if (!(cache_param->diag_bitmap & 0x2000)) {
sp = THR_GetSession();
if (sp != NULL)
pan_sess(sp);
@@ -354,11 +354,11 @@ pan_ic(const char *func, const char *file, int line, const char *cond,
VSB_printf(vsp, "\n");
VSB_bcat(vsp, "", 1); /* NUL termination */
- if (params->diag_bitmap & 0x4000)
+ if (cache_param->diag_bitmap & 0x4000)
(void)fputs(VSM_head->panicstr, stderr);
#ifdef HAVE_ABORT2
- if (params->diag_bitmap & 0x8000) {
+ if (cache_param->diag_bitmap & 0x8000) {
void *arg[1];
char *p;
@@ -369,7 +369,7 @@ pan_ic(const char *func, const char *file, int line, const char *cond,
abort2(VSM_head->panicstr, 1, arg);
}
#endif
- if (params->diag_bitmap & 0x1000)
+ if (cache_param->diag_bitmap & 0x1000)
exit(4);
else
abort();
diff --git a/bin/varnishd/cache_pipe.c b/bin/varnishd/cache_pipe.c
index be17ac9..4180d39 100644
--- a/bin/varnishd/cache_pipe.c
+++ b/bin/varnishd/cache_pipe.c
@@ -108,7 +108,7 @@ PipeSession(struct sess *sp)
while (fds[0].fd > -1 || fds[1].fd > -1) {
fds[0].revents = 0;
fds[1].revents = 0;
- i = poll(fds, 2, params->pipe_timeout * 1000);
+ i = poll(fds, 2, cache_param->pipe_timeout * 1000);
if (i < 1)
break;
if (fds[0].revents && rdf(vc->fd, sp->fd)) {
diff --git a/bin/varnishd/cache_pool.c b/bin/varnishd/cache_pool.c
index aed596e..62d58aa 100644
--- a/bin/varnishd/cache_pool.c
+++ b/bin/varnishd/cache_pool.c
@@ -272,7 +272,7 @@ Pool_Work_Thread(void *priv, struct worker *w)
AZ(w->wrw.wfd);
AZ(w->storage_hint);
assert(w->wlp == w->wlb);
- if (params->diag_bitmap & 0x00040000) {
+ if (cache_param->diag_bitmap & 0x00040000) {
if (w->vcl != NULL)
VCL_Rel(&w->vcl);
}
@@ -308,7 +308,7 @@ pool_queue(struct pool *pp, struct sess *sp)
}
/* If we have too much in the queue already, refuse. */
- if (pp->lqueue > (params->queue_max * pp->nthr) / 100) {
+ if (pp->lqueue > (cache_param->queue_max * pp->nthr) / 100) {
pp->ndropped++;
Lck_Unlock(&pp->mtx);
return (-1);
@@ -385,10 +385,10 @@ pool_breed(struct pool *qp, const pthread_attr_t *tp_attr)
* If we need more threads, and have space, create
* one more thread.
*/
- if (qp->nthr < params->wthread_min || /* Not enough threads yet */
- (qp->lqueue > params->wthread_add_threshold && /* more needed */
+ if (qp->nthr < cache_param->wthread_min || /* Not enough threads yet */
+ (qp->lqueue > cache_param->wthread_add_threshold && /* more needed */
qp->lqueue > qp->last_lqueue)) { /* not getting better since last */
- if (qp->nthr > params->wthread_max) {
+ if (qp->nthr > cache_param->wthread_max) {
Lck_Lock(&pool_mtx);
VSC_C_main->threads_limited++;
Lck_Unlock(&pool_mtx);
@@ -398,10 +398,10 @@ pool_breed(struct pool *qp, const pthread_attr_t *tp_attr)
Lck_Lock(&pool_mtx);
VSC_C_main->threads_limited++;
Lck_Unlock(&pool_mtx);
- VTIM_sleep(params->wthread_fail_delay * 1e-3);
+ VTIM_sleep(cache_param->wthread_fail_delay * 1e-3);
} else {
AZ(pthread_detach(tp));
- VTIM_sleep(params->wthread_add_delay * 1e-3);
+ VTIM_sleep(cache_param->wthread_add_delay * 1e-3);
qp->nthr++;
Lck_Lock(&pool_mtx);
VSC_C_main->threads++;
@@ -442,9 +442,9 @@ pool_herder(void *priv)
while (1) {
/* Set the stacksize for worker threads we create */
- if (params->wthread_stacksize != UINT_MAX)
+ if (cache_param->wthread_stacksize != UINT_MAX)
AZ(pthread_attr_setstacksize(&tp_attr,
- params->wthread_stacksize));
+ cache_param->wthread_stacksize));
else {
AZ(pthread_attr_destroy(&tp_attr));
AZ(pthread_attr_init(&tp_attr));
@@ -452,13 +452,13 @@ pool_herder(void *priv)
pool_breed(pp, &tp_attr);
- if (pp->nthr < params->wthread_min)
+ if (pp->nthr < cache_param->wthread_min)
continue;
AZ(clock_gettime(CLOCK_MONOTONIC, &ts));
- ts.tv_sec += params->wthread_purge_delay / 1000;
+ ts.tv_sec += cache_param->wthread_purge_delay / 1000;
ts.tv_nsec +=
- (params->wthread_purge_delay % 1000) * 1000000;
+ (cache_param->wthread_purge_delay % 1000) * 1000000;
if (ts.tv_nsec >= 1000000000) {
ts.tv_sec++;
ts.tv_nsec -= 1000000000;
@@ -470,10 +470,10 @@ pool_herder(void *priv)
if (!i)
continue;
- if (pp->nthr <= params->wthread_min)
+ if (pp->nthr <= cache_param->wthread_min)
continue;
- t_idle = VTIM_real() - params->wthread_timeout;
+ t_idle = VTIM_real() - cache_param->wthread_timeout;
Lck_Lock(&pp->mtx);
VSC_C_main->sess_queued += pp->nqueued;
@@ -481,7 +481,7 @@ pool_herder(void *priv)
pp->nqueued = pp->ndropped = 0;
w = VTAILQ_LAST(&pp->idle, workerhead);
if (w != NULL &&
- (w->lastused < t_idle || pp->nthr > params->wthread_max)) {
+ (w->lastused < t_idle || pp->nthr > cache_param->wthread_max)) {
VTAILQ_REMOVE(&pp->idle, w, list);
} else
w = NULL;
@@ -560,7 +560,7 @@ pool_poolherder(void *priv)
nwq = 0;
while (1) {
- if (nwq < params->wthread_pools) {
+ if (nwq < cache_param->wthread_pools) {
pp = pool_mkpool();
if (pp != NULL) {
VTAILQ_INSERT_TAIL(&pools, pp, list);
diff --git a/bin/varnishd/cache_response.c b/bin/varnishd/cache_response.c
index 87b0919..487a514 100644
--- a/bin/varnishd/cache_response.c
+++ b/bin/varnishd/cache_response.c
@@ -118,7 +118,7 @@ RES_BuildHttp(const struct sess *sp)
if (!(sp->wrk->res_mode & RES_LEN)) {
http_Unset(sp->wrk->resp, H_Content_Length);
- } else if (params->http_range_support) {
+ } else if (cache_param->http_range_support) {
/* We only accept ranges if we know the length */
http_SetHeader(sp->wrk, sp->vsl_id, sp->wrk->resp,
"Accept-Ranges: bytes");
@@ -156,7 +156,7 @@ res_WriteGunzipObj(const struct sess *sp)
struct storage *st;
unsigned u = 0;
struct vgz *vg;
- char obuf[params->gzip_stack_buffer];
+ char obuf[cache_param->gzip_stack_buffer];
ssize_t obufl = 0;
int i;
@@ -230,7 +230,7 @@ res_WriteDirObj(const struct sess *sp, ssize_t low, ssize_t high)
* XXX: Should use getpagesize() ?
*/
if (st->fd >= 0 &&
- st->len >= params->sendfile_threshold) {
+ st->len >= cache_param->sendfile_threshold) {
VSC_C_main->n_objsendfile++;
WRW_Sendfile(sp->wrk, st->fd, st->where + off, len);
continue;
@@ -275,7 +275,7 @@ RES_WriteObj(struct sess *sp)
sp->wantbody &&
(sp->wrk->res_mode & RES_LEN) &&
!(sp->wrk->res_mode & (RES_ESI|RES_ESI_CHILD|RES_GUNZIP)) &&
- params->http_range_support &&
+ cache_param->http_range_support &&
sp->obj->response == 200 &&
http_GetHdr(sp->http, H_Range, &r))
res_dorange(sp, r, &low, &high);
diff --git a/bin/varnishd/cache_session.c b/bin/varnishd/cache_session.c
index 5db418c..7befbcc 100644
--- a/bin/varnishd/cache_session.c
+++ b/bin/varnishd/cache_session.c
@@ -107,8 +107,8 @@ ses_sm_alloc(void)
* cache them locally, to make sure we get a consistent
* view of the value.
*/
- nws = params->sess_workspace;
- nhttp = (uint16_t)params->http_max_hdr;
+ nws = cache_param->sess_workspace;
+ nhttp = (uint16_t)cache_param->http_max_hdr;
hl = HTTP_estimate(nhttp);
l = sizeof *sm + nws + 2 * hl;
@@ -192,7 +192,7 @@ SES_New(struct worker *wrk, struct sesspool *pp)
sm = VTAILQ_FIRST(&pp->freelist);
if (sm != NULL) {
VTAILQ_REMOVE(&pp->freelist, sm, list);
- } else if (pp->nsess < params->max_sess) {
+ } else if (pp->nsess < cache_param->max_sess) {
pp->nsess++;
do_alloc = 1;
}
@@ -355,9 +355,9 @@ SES_Delete(struct sess *sp, const char *reason)
b->sess, b->req, b->pipe, b->pass,
b->fetch, b->hdrbytes, b->bodybytes);
- if (sm->workspace != params->sess_workspace ||
- sm->nhttp != (uint16_t)params->http_max_hdr ||
- pp->nsess > params->max_sess) {
+ if (sm->workspace != cache_param->sess_workspace ||
+ sm->nhttp != (uint16_t)cache_param->http_max_hdr ||
+ pp->nsess > cache_param->max_sess) {
free(sm);
Lck_Lock(&pp->mtx);
if (wrk != NULL)
diff --git a/bin/varnishd/cache_shmlog.c b/bin/varnishd/cache_shmlog.c
index 5601463..1252fa3 100644
--- a/bin/varnishd/cache_shmlog.c
+++ b/bin/varnishd/cache_shmlog.c
@@ -135,7 +135,7 @@ VSLR(enum VSL_tag_e tag, int id, const char *b, unsigned len)
uint32_t *p;
unsigned mlen;
- mlen = params->shm_reclen;
+ mlen = cache_param->shm_reclen;
/* Truncate */
if (len > mlen)
@@ -153,7 +153,7 @@ void
VSL(enum VSL_tag_e tag, int id, const char *fmt, ...)
{
va_list ap;
- unsigned n, mlen = params->shm_reclen;
+ unsigned n, mlen = cache_param->shm_reclen;
char buf[mlen];
/*
@@ -205,7 +205,7 @@ WSLR(struct worker *w, enum VSL_tag_e tag, int id, txt t)
unsigned l, mlen;
Tcheck(t);
- mlen = params->shm_reclen;
+ mlen = cache_param->shm_reclen;
/* Truncate */
l = Tlen(t);
@@ -225,7 +225,7 @@ WSLR(struct worker *w, enum VSL_tag_e tag, int id, txt t)
w->wlp = VSL_END(w->wlp, l);
assert(w->wlp < w->wle);
w->wlr++;
- if (params->diag_bitmap & 0x10000)
+ if (cache_param->diag_bitmap & 0x10000)
WSL_Flush(w, 0);
}
@@ -239,7 +239,7 @@ wsl(struct worker *w, enum VSL_tag_e tag, int id, const char *fmt, va_list ap)
txt t;
AN(fmt);
- mlen = params->shm_reclen;
+ mlen = cache_param->shm_reclen;
if (strchr(fmt, '%') == NULL) {
t.b = TRUST_ME(fmt);
@@ -261,7 +261,7 @@ wsl(struct worker *w, enum VSL_tag_e tag, int id, const char *fmt, va_list ap)
assert(w->wlp < w->wle);
w->wlr++;
}
- if (params->diag_bitmap & 0x10000)
+ if (cache_param->diag_bitmap & 0x10000)
WSL_Flush(w, 0);
}
diff --git a/bin/varnishd/cache_vary.c b/bin/varnishd/cache_vary.c
index c528fb8..026f937 100644
--- a/bin/varnishd/cache_vary.c
+++ b/bin/varnishd/cache_vary.c
@@ -155,7 +155,7 @@ vry_cmp(const uint8_t * const *v1, uint8_t * const *v2)
} else if (memcmp((*v1) + 2, (*v2) + 2, (*v1)[2] + 2)) {
/* Different header */
retval = 1;
- } else if (params->http_gzip_support &&
+ } else if (cache_param->http_gzip_support &&
!strcasecmp(H_Accept_Encoding, (const char*)((*v1)+2))) {
/*
* If we do gzip processing, we do not vary on Accept-Encoding,
diff --git a/bin/varnishd/cache_vrt.c b/bin/varnishd/cache_vrt.c
index 16258ec..3851de7 100644
--- a/bin/varnishd/cache_vrt.c
+++ b/bin/varnishd/cache_vrt.c
@@ -73,7 +73,7 @@ VRT_count(const struct sess *sp, unsigned u)
if (sp == NULL)
return;
CHECK_OBJ_NOTNULL(sp, SESS_MAGIC);
- if (params->vcl_trace)
+ if (cache_param->vcl_trace)
WSP(sp, SLT_VCL_trace, "%u %d.%d", u,
sp->vcl->ref[u].line, sp->vcl->ref[u].pos);
}
diff --git a/bin/varnishd/cache_vrt_re.c b/bin/varnishd/cache_vrt_re.c
index d6dc5d5..7759d0a 100644
--- a/bin/varnishd/cache_vrt_re.c
+++ b/bin/varnishd/cache_vrt_re.c
@@ -71,7 +71,7 @@ VRT_re_match(const struct sess *sp, const char *s, void *re)
s = "";
AN(re);
t = re;
- i = VRE_exec(t, s, strlen(s), 0, 0, NULL, 0, ¶ms->vre_limits);
+ i = VRE_exec(t, s, strlen(s), 0, 0, NULL, 0, &cache_param->vre_limits);
if (i >= 0)
return (1);
if (i < VRE_ERROR_NOMATCH )
@@ -100,7 +100,7 @@ VRT_regsub(const struct sess *sp, int all, const char *str, void *re,
memset(ovector, 0, sizeof(ovector));
len = strlen(str);
i = VRE_exec(t, str, len, 0, options, ovector, 30,
- ¶ms->vre_limits);
+ &cache_param->vre_limits);
/* If it didn't match, we can return the original string */
if (i == VRE_ERROR_NOMATCH)
@@ -141,7 +141,7 @@ VRT_regsub(const struct sess *sp, int all, const char *str, void *re,
memset(&ovector, 0, sizeof(ovector));
options |= VRE_NOTEMPTY_ATSTART;
i = VRE_exec(t, str, len, 0, options, ovector, 30,
- ¶ms->vre_limits);
+ &cache_param->vre_limits);
if (i < VRE_ERROR_NOMATCH ) {
WS_Release(sp->http->ws, 0);
WSP(sp, SLT_VCL_Error,
diff --git a/bin/varnishd/cache_wrk.c b/bin/varnishd/cache_wrk.c
index e416eb9..bfee84c 100644
--- a/bin/varnishd/cache_wrk.c
+++ b/bin/varnishd/cache_wrk.c
@@ -185,15 +185,15 @@ WRK_thread(void *priv)
uint16_t nhttp;
unsigned siov;
- assert(params->http_max_hdr <= 65535);
+ assert(cache_param->http_max_hdr <= 65535);
/* We need to snapshot these two for consistency */
- nhttp = (uint16_t)params->http_max_hdr;
+ nhttp = (uint16_t)cache_param->http_max_hdr;
siov = nhttp * 2;
if (siov > IOV_MAX)
siov = IOV_MAX;
return (wrk_thread_real(priv,
- params->shm_workspace,
- params->wthread_workspace,
+ cache_param->shm_workspace,
+ cache_param->wthread_workspace,
nhttp, HTTP_estimate(nhttp), siov));
}
diff --git a/bin/varnishd/cache_wrw.c b/bin/varnishd/cache_wrw.c
index 3e2de4a..2160f69 100644
--- a/bin/varnishd/cache_wrw.c
+++ b/bin/varnishd/cache_wrw.c
@@ -134,7 +134,7 @@ WRW_Flush(struct worker *w)
*/
size_t used = 0;
- if (VTIM_real() - w->sp->t_resp > params->send_timeout) {
+ if (VTIM_real() - w->sp->t_resp > cache_param->send_timeout) {
WSL(w, SLT_Debug, *wrw->wfd,
"Hit total send timeout, wrote = %ld/%ld; not retrying",
i, wrw->liov);
@@ -309,7 +309,7 @@ WRW_Sendfile(struct worker *w, int fd, off_t off, unsigned len)
} while (0);
#elif defined(__sun) && defined(HAVE_SENDFILEV)
do {
- sendfilevec_t svvec[params->http_headers * 2 + 1];
+ sendfilevec_t svvec[cache_param->http_headers * 2 + 1];
size_t xferred = 0, expected = 0;
int i;
for (i = 0; i < wrw->niov; i++) {
diff --git a/bin/varnishd/hash/hash_critbit.c b/bin/varnishd/hash/hash_critbit.c
index 56cc5c0..b1c9cf3 100644
--- a/bin/varnishd/hash/hash_critbit.c
+++ b/bin/varnishd/hash/hash_critbit.c
@@ -369,7 +369,7 @@ hcb_cleaner(void *priv)
VTAILQ_CONCAT(&dead_h, &cool_h, hoh_list);
Lck_Unlock(&hcb_mtx);
WRK_SumStat(&ww);
- VTIM_sleep(params->critbit_cooloff);
+ VTIM_sleep(cache_param->critbit_cooloff);
}
NEEDLESS_RETURN(NULL);
}
diff --git a/bin/varnishd/heritage.h b/bin/varnishd/heritage.h
index 3732f18..39fc03e 100644
--- a/bin/varnishd/heritage.h
+++ b/bin/varnishd/heritage.h
@@ -222,7 +222,7 @@ struct params {
* We declare this a volatile pointer, so that reads of parameters
* become atomic, leaving the CLI thread lattitude to change the values
*/
-extern volatile struct params * params;
+extern volatile struct params * cache_param;
extern struct heritage heritage;
void child_main(void);
diff --git a/bin/varnishd/mgt/mgt_child.c b/bin/varnishd/mgt/mgt_child.c
index 9c6d37d..9503e5b 100644
--- a/bin/varnishd/mgt/mgt_child.c
+++ b/bin/varnishd/mgt/mgt_child.c
@@ -315,7 +315,7 @@ start_child(struct cli *cli)
heritage.std_fd = cp[1];
child_output = cp[0];
- AN(params);
+ AN(cache_param);
if ((pid = fork()) < 0) {
perror("Could not fork child");
exit(1);
diff --git a/bin/varnishd/mgt/mgt_param.c b/bin/varnishd/mgt/mgt_param.c
index 20456d7..dc6b708 100644
--- a/bin/varnishd/mgt/mgt_param.c
+++ b/bin/varnishd/mgt/mgt_param.c
@@ -1074,8 +1074,8 @@ MCF_ParamSet(struct cli *cli, const char *param, const char *val)
}
pp->func(cli, pp, val);
- if (cli->result == CLIS_OK && params != NULL)
- *params = mgt_param;
+ if (cli->result == CLIS_OK && cache_param != NULL)
+ *cache_param = mgt_param;
if (cli->result != CLIS_OK) {
VCLI_Out(cli, "(attempting to set param %s to %s)\n",
diff --git a/bin/varnishd/mgt/mgt_shmem.c b/bin/varnishd/mgt/mgt_shmem.c
index 37083df..3953bdb 100644
--- a/bin/varnishd/mgt/mgt_shmem.c
+++ b/bin/varnishd/mgt/mgt_shmem.c
@@ -321,9 +321,9 @@ mgt_SHM_Init(const char *l_arg)
AN(VSC_C_main);
/* XXX: We need to zero params if we dealloc/clean/wash */
- params = VSM_Alloc(sizeof *params, VSM_CLASS_PARAM, "", "");
- AN(params);
- *params = mgt_param;
+ cache_param = VSM_Alloc(sizeof *cache_param, VSM_CLASS_PARAM, "", "");
+ AN(cache_param);
+ *cache_param = mgt_param;
vsl_log_start = VSM_Alloc(s1, VSL_CLASS, "", "");
AN(vsl_log_start);
diff --git a/bin/varnishd/rfc2616.c b/bin/varnishd/rfc2616.c
index d4d5a05..4041f45 100644
--- a/bin/varnishd/rfc2616.c
+++ b/bin/varnishd/rfc2616.c
@@ -74,7 +74,7 @@ RFC2616_Ttl(const struct sess *sp)
assert(sp->wrk->exp.entered != 0.0 && !isnan(sp->wrk->exp.entered));
/* If all else fails, cache using default ttl */
- sp->wrk->exp.ttl = params->default_ttl;
+ sp->wrk->exp.ttl = cache_param->default_ttl;
max_age = age = 0;
h_expires = 0;
@@ -140,7 +140,7 @@ RFC2616_Ttl(const struct sess *sp)
}
if (h_date == 0 ||
- fabs(h_date - sp->wrk->exp.entered) < params->clock_skew) {
+ fabs(h_date - sp->wrk->exp.entered) < cache_param->clock_skew) {
/*
* If we have no Date: header or if it is
* sufficiently close to our clock we will
diff --git a/bin/varnishd/storage/stevedore.c b/bin/varnishd/storage/stevedore.c
index 4048836..b119c06 100644
--- a/bin/varnishd/storage/stevedore.c
+++ b/bin/varnishd/storage/stevedore.c
@@ -169,8 +169,8 @@ stv_alloc(struct worker *w, const struct object *obj, size_t size)
stv = obj->objstore->stevedore;
CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC);
- if (size > (size_t)(params->fetch_maxchunksize) << 10)
- size = (size_t)(params->fetch_maxchunksize) << 10;
+ if (size > (size_t)(cache_param->fetch_maxchunksize) << 10)
+ size = (size_t)(cache_param->fetch_maxchunksize) << 10;
for (;;) {
/* try to allocate from it */
@@ -179,7 +179,7 @@ stv_alloc(struct worker *w, const struct object *obj, size_t size)
if (st != NULL)
break;
- if (size > params->fetch_chunksize * 1024LL) {
+ if (size > cache_param->fetch_chunksize * 1024LL) {
size >>= 1;
continue;
}
@@ -189,7 +189,7 @@ stv_alloc(struct worker *w, const struct object *obj, size_t size)
break;
/* Enough is enough: try another if we have one */
- if (++fail >= params->nuke_limit)
+ if (++fail >= cache_param->nuke_limit)
break;
}
if (st != NULL)
@@ -336,7 +336,7 @@ STV_NewObject(struct sess *sp, const char *hint, unsigned wsl, struct exp *ep,
}
if (o == NULL) {
/* no luck; try to free some space and keep trying */
- for (i = 0; o == NULL && i < params->nuke_limit; i++) {
+ for (i = 0; o == NULL && i < cache_param->nuke_limit; i++) {
if (EXP_NukeOne(sp->wrk, stv->lru) == -1)
break;
o = stv->allocobj(stv, sp, ltot, &soc);
diff --git a/bin/varnishd/waiter/cache_waiter_epoll.c b/bin/varnishd/waiter/cache_waiter_epoll.c
index e700676..b5068a6 100644
--- a/bin/varnishd/waiter/cache_waiter_epoll.c
+++ b/bin/varnishd/waiter/cache_waiter_epoll.c
@@ -188,7 +188,7 @@ vwe_thread(void *priv)
continue;
/* check for timeouts */
- deadline = VTIM_real() - params->sess_timeout;
+ deadline = VTIM_real() - cache_param->sess_timeout;
for (;;) {
sp = VTAILQ_FIRST(&vwe->sesshead);
if (sp == NULL)
diff --git a/bin/varnishd/waiter/cache_waiter_kqueue.c b/bin/varnishd/waiter/cache_waiter_kqueue.c
index b300fae..a631606 100644
--- a/bin/varnishd/waiter/cache_waiter_kqueue.c
+++ b/bin/varnishd/waiter/cache_waiter_kqueue.c
@@ -186,7 +186,7 @@ vwk_thread(void *priv)
* would not know we meant "the old fd of this number".
*/
vwk_kq_flush(vwk);
- deadline = VTIM_real() - params->sess_timeout;
+ deadline = VTIM_real() - cache_param->sess_timeout;
for (;;) {
sp = VTAILQ_FIRST(&vwk->sesshead);
if (sp == NULL)
diff --git a/bin/varnishd/waiter/cache_waiter_poll.c b/bin/varnishd/waiter/cache_waiter_poll.c
index 2617365..5f8dbd7 100644
--- a/bin/varnishd/waiter/cache_waiter_poll.c
+++ b/bin/varnishd/waiter/cache_waiter_poll.c
@@ -140,7 +140,7 @@ vwp_main(void *priv)
assert(vwp->pollfd[vwp->pipes[1]].fd == -1);
v = poll(vwp->pollfd, vwp->hpoll + 1, 100);
assert(v >= 0);
- deadline = VTIM_real() - params->sess_timeout;
+ deadline = VTIM_real() - cache_param->sess_timeout;
v2 = v;
VTAILQ_FOREACH_SAFE(sp, &vwp->sesshead, list, sp2) {
if (v != 0 && v2 == 0)
diff --git a/bin/varnishd/waiter/cache_waiter_ports.c b/bin/varnishd/waiter/cache_waiter_ports.c
index 022131b..aa9f86d 100644
--- a/bin/varnishd/waiter/cache_waiter_ports.c
+++ b/bin/varnishd/waiter/cache_waiter_ports.c
@@ -197,7 +197,7 @@ vws_thread(void *priv)
/* check for timeouts */
now = VTIM_real();
- deadline = now - params->sess_timeout;
+ deadline = now - cache_param->sess_timeout;
/*
* This loop assumes that the oldest sessions are always at the
@@ -225,7 +225,7 @@ vws_thread(void *priv)
*/
if (sp) {
- double tmo = (sp->t_open + params->sess_timeout) - now;
+ double tmo = (sp->t_open + cache_param->sess_timeout) - now;
/* we should have removed all sps whose timeout has passed */
assert(tmo > 0.0);
More information about the varnish-commit
mailing list