[master] 6f7a1d564 shard: make `.reconfigure()` call optional
Nils Goroll
nils.goroll at uplex.de
Fri Oct 9 18:06:06 UTC 2020
commit 6f7a1d5640cdaef571ab5861e70d9b717a117190
Author: Nils Goroll <nils.goroll at uplex.de>
Date: Fri Oct 9 18:55:55 2020 +0200
shard: make `.reconfigure()` call optional
Making use of the PRIV_TASK `.free()` callback, we can finalize the
reconfiguration when the tasks ends, in case `.reconfigure()` has not
been called explicitly. o/ Dridi
Also turn `SLT_Error` `"(notice)"` logs into `SLT_Notice`.
diff --git a/bin/varnishtest/tests/d00015.vtc b/bin/varnishtest/tests/d00015.vtc
index 37899b829..0a5b0da57 100644
--- a/bin/varnishtest/tests/d00015.vtc
+++ b/bin/varnishtest/tests/d00015.vtc
@@ -182,12 +182,10 @@ varnish v1 -vcl+backend {
vd.add_backend(s2, "5");
vd.remove_backend(s1, "5");
vd.remove_backend(s1, "4");
- vd.add_backend(s3, "4");
- vd.add_backend(s3, "5");
- if (!vd.reconfigure(replicas=1)) {
- return(fail("reconfigure failed"));
- }
+ vd.add_backend(s3, "4a");
+ vd.add_backend(s3, "5a");
vd.set_warmup(0);
+ # implicit .reconfigure() - checked via backend.list
}
sub vcl_recv {
@@ -220,14 +218,14 @@ logexpect l1 -v v1 -g raw -d 1 {
expect 0 0 VCL_Log {^reconfigure failed}
expect 0 0 VCL_Log {^-- duplicate add$}
- expect 0 0 Error {^shard vd: .notice. backend s1 already exists - skipping$}
+ expect 0 0 Notice {^shard vd: backend s1 already exists - skipping$}
expect 0 0 Debug {^shard:.*point = 6e040182, host = 1}
expect 0 0 Debug {^shard:.*point = f08ad325, host = 0}
expect 0 0 VCL_Log {^-- duplicate add with idents$}
- expect 0 0 Error {^shard vd: .notice. backend s1 already exists - skipping}
- expect 0 0 Error {^shard vd: .notice. backend s1/s1_1 already exists - skipping}
- expect 0 0 Error {^shard vd: .notice. backend s1/s1_2 already exists - skipping}
+ expect 0 0 Notice {^shard vd: backend s1 already exists - skipping}
+ expect 0 0 Notice {^shard vd: backend s1/s1_1 already exists - skipping}
+ expect 0 0 Notice {^shard vd: backend s1/s1_2 already exists - skipping}
expect 0 0 Debug {^shard:.*point = 6e040182, host = 3}
expect 0 0 Debug {^shard:.*point = 732c7bbe, host = 2}
expect 0 0 Debug {^shard:.*point = bae80b0b, host = 1}
@@ -282,4 +280,4 @@ client c1 {
logexpect l1 -wait
-varnish v1 -cliexpect {(?s)Ident.*s3 +1 +h.*s3 +2 +h.*s3 +4 +h.*s3 +5 +h.*s3 +6 +h.*s3 +7 +h} "backend.list -p"
+varnish v1 -cliexpect {(?s)Ident.*s3 +1 +h.*s3 +2 +h.*s3 +4a +h.*s3 +5a +h.*s3 +6 +h.*s3 +7 +h} "backend.list -p"
diff --git a/bin/varnishtest/tests/d00016.vtc b/bin/varnishtest/tests/d00016.vtc
index 5452e0908..a6a8ed70a 100644
--- a/bin/varnishtest/tests/d00016.vtc
+++ b/bin/varnishtest/tests/d00016.vtc
@@ -1,6 +1,8 @@
varnishtest "shard director/int reconfiguration outside init"
server s1 {
+ rxreq
+ txresp -body "ech3Ooj"
} -start
server s2 {
@@ -9,6 +11,8 @@ server s2 {
server s3 {
rxreq
txresp -body "xiuFi3Pe"
+ rxreq
+ txresp -body "xiuFi3Pe"
} -start
varnish v1 -vcl+backend {
@@ -22,7 +26,17 @@ varnish v1 -vcl+backend {
}
sub vcl_recv {
-
+ if (req.http.change == "apply") {
+ # test implicit reconfigure at the end of task
+ vd.clear();
+ vd.add_backend(s1);
+ set req.backend_hint = vd.backend();
+ return (pass);
+ }
+ if (req.http.change == "test") {
+ set req.backend_hint = vd.backend();
+ return (pass);
+ }
std.log("-- invalid replicas");
if (!vd.reconfigure(replicas=0)) {
# continue intentionally
@@ -162,11 +176,11 @@ logexpect l1 -v v1 -g raw -d 1 {
expect 0 1001 VCL_Log {^reconfigure failed}
expect 0 1001 VCL_Log {^-- duplicate add$}
- expect 0 1001 Error {^shard vd: .notice. backend s1 already exists - skipping$}
+ expect 0 1001 Notice {^shard vd: backend s1 already exists - skipping$}
expect 0 1001 VCL_Log {^-- duplicate add with idents$}
- expect 0 1001 Error {^shard vd: .notice. backend s1 already exists - skipping}
- expect 0 1001 Error {^shard vd: .notice. backend s1/s1_1 already exists - skipping}
- expect 0 1001 Error {^shard vd: .notice. backend s1/s1_2 already exists - skipping}
+ expect 0 1001 Notice {^shard vd: backend s1 already exists - skipping}
+ expect 0 1001 Notice {^shard vd: backend s1/s1_1 already exists - skipping}
+ expect 0 1001 Notice {^shard vd: backend s1/s1_2 already exists - skipping}
expect 0 1001 VCL_Log {^-- remove s1_2 specifically$}
expect 0 1001 VCL_Log {^-- remove all instances of s1$}
expect 0 1001 VCL_Log {^-- re-add some - no 2nd director$}
@@ -220,6 +234,16 @@ client c1 {
txreq
rxresp
expect resp.body == "xiuFi3Pe"
+
+ # director not yet changed for this task
+ txreq -hdr "change: apply"
+ rxresp
+ expect resp.body == "xiuFi3Pe"
+
+ # change active
+ txreq -hdr "change: test"
+ rxresp
+ expect resp.body == "ech3Ooj"
} -run
logexpect l1 -wait
diff --git a/doc/changes.rst b/doc/changes.rst
index a2ab4f8b1..7c4bbba93 100644
--- a/doc/changes.rst
+++ b/doc/changes.rst
@@ -41,6 +41,13 @@ Varnish Cache Next (2021-03-15)
backends) of several instances without any special ordering
requirement.
+* Calling the shard director ``.reconfigure()`` method is now
+ optional. If not called explicitly, any shard director backend
+ changes are applied at the end of the current task.
+
+* Shard director ``Error`` log messages with ``(notice)`` have been
+ turned into ``Notice`` log messages.
+
================================
Varnish Cache 6.5.1 (2020-09-25)
================================
diff --git a/lib/libvmod_directors/shard_cfg.c b/lib/libvmod_directors/shard_cfg.c
index 6976b96bd..b61cb9392 100644
--- a/lib/libvmod_directors/shard_cfg.c
+++ b/lib/libvmod_directors/shard_cfg.c
@@ -62,7 +62,8 @@ struct shard_change_task {
struct shard_change {
unsigned magic;
#define SHARD_CHANGE_MAGIC 0xdff5c9a6
- const struct sharddir *shardd;
+ struct vsl_log *vsl;
+ struct sharddir *shardd;
VSTAILQ_HEAD(,shard_change_task) tasks;
};
@@ -73,6 +74,10 @@ struct backend_reconfig {
unsigned hole_i; // index hint on first hole
};
+/* forward decl */
+static VCL_BOOL
+change_reconfigure(struct shard_change *change, VCL_INT replicas);
+
/*
* ============================================================
* change / task list
@@ -81,8 +86,21 @@ struct backend_reconfig {
* a PRIV_TASK state, which we work in reconfigure.
*/
+static void v_matchproto_(vmod_priv_free_f)
+shard_change_fini(void * priv)
+{
+ struct shard_change *change;
+
+ if (priv == NULL)
+ return;
+
+ CAST_OBJ_NOTNULL(change, priv, SHARD_CHANGE_MAGIC);
+
+ (void) change_reconfigure(change, 67);
+}
+
static struct shard_change *
-shard_change_get(VRT_CTX, const struct sharddir * const shardd)
+shard_change_get(VRT_CTX, struct sharddir * const shardd)
{
struct vmod_priv *task;
struct shard_change *change;
@@ -94,6 +112,7 @@ shard_change_get(VRT_CTX, const struct sharddir * const shardd)
if (task->priv != NULL) {
CAST_OBJ_NOTNULL(change, task->priv, SHARD_CHANGE_MAGIC);
+ assert (change->vsl == ctx->vsl);
assert (change->shardd == shardd);
return (change);
}
@@ -105,9 +124,11 @@ shard_change_get(VRT_CTX, const struct sharddir * const shardd)
}
INIT_OBJ(change, SHARD_CHANGE_MAGIC);
+ change->vsl = ctx->vsl;
change->shardd = shardd;
VSTAILQ_INIT(&change->tasks);
task->priv = change;
+ task->free = shard_change_fini;
return (change);
}
@@ -142,7 +163,7 @@ shard_change_task_add(VRT_CTX, struct shard_change *change,
}
static inline struct shard_change_task *
-shard_change_task_backend(VRT_CTX, const struct sharddir *shardd,
+shard_change_task_backend(VRT_CTX, struct sharddir *shardd,
enum shard_change_task_e task_e, VCL_BACKEND be, VCL_STRING ident,
VCL_DURATION rampup)
{
@@ -174,7 +195,7 @@ shard_change_task_backend(VRT_CTX, const struct sharddir *shardd,
* director reconfiguration tasks
*/
VCL_BOOL
-shardcfg_add_backend(VRT_CTX, const struct sharddir *shardd,
+shardcfg_add_backend(VRT_CTX, struct sharddir *shardd,
VCL_BACKEND be, VCL_STRING ident, VCL_DURATION rampup, VCL_REAL weight)
{
struct shard_change_task *task;
@@ -193,7 +214,7 @@ shardcfg_add_backend(VRT_CTX, const struct sharddir *shardd,
}
VCL_BOOL
-shardcfg_remove_backend(VRT_CTX, const struct sharddir *shardd,
+shardcfg_remove_backend(VRT_CTX, struct sharddir *shardd,
VCL_BACKEND be, VCL_STRING ident)
{
return (shard_change_task_backend(ctx, shardd, REMOVE_BE,
@@ -201,7 +222,7 @@ shardcfg_remove_backend(VRT_CTX, const struct sharddir *shardd,
}
VCL_BOOL
-shardcfg_clear(VRT_CTX, const struct sharddir *shardd)
+shardcfg_clear(VRT_CTX, struct sharddir *shardd)
{
struct shard_change *change;
@@ -510,7 +531,7 @@ shardcfg_backend_finalize(struct backend_reconfig *re)
*/
static void
-shardcfg_apply_change(VRT_CTX, struct sharddir *shardd,
+shardcfg_apply_change(struct vsl_log *vsl, struct sharddir *shardd,
const struct shard_change *change, VCL_INT replicas)
{
struct shard_change_task *task, *clear;
@@ -577,8 +598,8 @@ shardcfg_apply_change(VRT_CTX, struct sharddir *shardd,
const char * const ident = b->ident;
- shard_err(ctx, shardd, "(notice) backend %s%s%s "
- "already exists - skipping",
+ sharddir_err(vsl, SLT_Notice, "shard %s: backend %s%s%s "
+ "already exists - skipping", shardd->name,
VRT_BACKEND_string(b->backend),
ident ? "/" : "",
ident ? ident : "");
@@ -598,28 +619,22 @@ shardcfg_apply_change(VRT_CTX, struct sharddir *shardd,
* top reconfiguration function
*/
-VCL_BOOL
-shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT replicas)
+static VCL_BOOL
+change_reconfigure(struct shard_change *change, VCL_INT replicas)
{
- struct shard_change *change;
+ struct sharddir *shardd;
+ CHECK_OBJ_NOTNULL(change, SHARD_CHANGE_MAGIC);
+ assert (replicas > 0);
+ shardd = change->shardd;
CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
- if (replicas <= 0) {
- shard_err(ctx, shardd,
- ".reconfigure() invalid replicas argument %ld", replicas);
- return (0);
- }
-
- change = shard_change_get(ctx, shardd);
- if (change == NULL)
- return (0);
if (VSTAILQ_FIRST(&change->tasks) == NULL)
return (1);
sharddir_wrlock(shardd);
- shardcfg_apply_change(ctx, shardd, change, replicas);
+ shardcfg_apply_change(change->vsl, shardd, change, replicas);
shard_change_finish(change);
if (shardd->hashcircle)
@@ -627,7 +642,8 @@ shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT replicas)
shardd->hashcircle = NULL;
if (shardd->n_backend == 0) {
- shard_err0(ctx, shardd, ".reconfigure() no backends");
+ sharddir_err(change->vsl, SLT_Error, "shard %s: .reconfigure() "
+ "no backends", shardd->name);
sharddir_unlock(shardd);
return (0);
}
@@ -637,6 +653,26 @@ shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT replicas)
return (1);
}
+VCL_BOOL
+shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT replicas)
+{
+ struct shard_change *change;
+
+ CHECK_OBJ_NOTNULL(shardd, SHARDDIR_MAGIC);
+ if (replicas <= 0) {
+ sharddir_err(ctx->vsl, SLT_Error,
+ "shard %s: .reconfigure() invalid replicas argument %ld",
+ shardd->name, replicas);
+ return (0);
+ }
+
+ change = shard_change_get(ctx, shardd);
+ if (change == NULL)
+ return (0);
+
+ return (change_reconfigure(change, replicas));
+}
+
/*
* ============================================================
* misc config related
diff --git a/lib/libvmod_directors/shard_cfg.h b/lib/libvmod_directors/shard_cfg.h
index 5c1dc5c20..87a6f69e4 100644
--- a/lib/libvmod_directors/shard_cfg.h
+++ b/lib/libvmod_directors/shard_cfg.h
@@ -28,11 +28,11 @@
* SUCH DAMAGE.
*/
-VCL_BOOL shardcfg_add_backend(VRT_CTX, const struct sharddir *shardd,
+VCL_BOOL shardcfg_add_backend(VRT_CTX, struct sharddir *shardd,
VCL_BACKEND be, VCL_STRING ident, VCL_DURATION rampup, VCL_REAL weight);
-VCL_BOOL shardcfg_remove_backend(VRT_CTX, const struct sharddir *shardd,
+VCL_BOOL shardcfg_remove_backend(VRT_CTX, struct sharddir *shardd,
VCL_BACKEND be, VCL_STRING ident);
-VCL_BOOL shardcfg_clear(VRT_CTX, const struct sharddir *shardd);
-VCL_BOOL shardcfg_reconfigure(VRT_CTX, struct sharddir *shardd, VCL_INT);
+VCL_BOOL shardcfg_clear(VRT_CTX, struct sharddir *shardd);
+VCL_BOOL shardcfg_reconfigure(VRT_CTX, struct sharddir *, VCL_INT);
VCL_VOID shardcfg_set_warmup(struct sharddir *shardd, VCL_REAL ratio);
VCL_VOID shardcfg_set_rampup(struct sharddir *shardd, VCL_DURATION duration);
diff --git a/lib/libvmod_directors/shard_dir.c b/lib/libvmod_directors/shard_dir.c
index 809fd67a5..1d4419b66 100644
--- a/lib/libvmod_directors/shard_dir.c
+++ b/lib/libvmod_directors/shard_dir.c
@@ -78,13 +78,13 @@ sharddir_debug(struct sharddir *shardd, const uint32_t flags)
}
void
-sharddir_err(VRT_CTX, enum VSL_tag_e tag, const char *fmt, ...)
+sharddir_err(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
- if (ctx->vsl)
- VSLbv(ctx->vsl, tag, fmt, ap);
+ if (vsl != NULL)
+ VSLbv(vsl, tag, fmt, ap);
else
VSLv(tag, 0, fmt, ap);
va_end(ap);
diff --git a/lib/libvmod_directors/shard_dir.h b/lib/libvmod_directors/shard_dir.h
index c2c8ddeab..27c0aee60 100644
--- a/lib/libvmod_directors/shard_dir.h
+++ b/lib/libvmod_directors/shard_dir.h
@@ -94,18 +94,18 @@ sharddir_backend(const struct sharddir *shardd, unsigned id)
#define shard_err(ctx, shardd, fmt, ...) \
do { \
- sharddir_err(ctx, SLT_Error, "shard %s: " fmt, \
+ sharddir_err((ctx)->vsl, SLT_Error, "shard %s: " fmt, \
(shardd)->name, __VA_ARGS__); \
} while (0)
#define shard_err0(ctx, shardd, msg) \
do { \
- sharddir_err(ctx, SLT_Error, "shard %s: %s", \
+ sharddir_err((ctx)->vsl, SLT_Error, "shard %s: %s", \
(shardd)->name, (msg)); \
} while (0)
void sharddir_debug(struct sharddir *shardd, const uint32_t flags);
-void sharddir_err(VRT_CTX, enum VSL_tag_e tag, const char *fmt, ...);
+void sharddir_err(struct vsl_log *, enum VSL_tag_e tag, const char *fmt, ...);
void sharddir_new(struct sharddir **sharddp, const char *vcl_name,
const struct vmod_directors_shard_param *param);
void sharddir_set_param(struct sharddir *shardd,
diff --git a/lib/libvmod_directors/vmod_directors.vcc b/lib/libvmod_directors/vmod_directors.vcc
index 542330cc8..161841531 100644
--- a/lib/libvmod_directors/vmod_directors.vcc
+++ b/lib/libvmod_directors/vmod_directors.vcc
@@ -3,7 +3,7 @@
# itself. See LICENCE for details.
#
# Copyright (c) 2013-2015 Varnish Software AS
-# Copyright 2009-2018 UPLEX - Nils Goroll Systemoptimierung
+# Copyright 2009-2020 UPLEX - Nils Goroll Systemoptimierung
# All rights reserved.
#
# Authors: Poul-Henning Kamp <phk at FreeBSD.org>
@@ -234,14 +234,6 @@ $Object shard()
Create a shard director.
-Note that the shard director needs to be configured using at least one
-`xshard.add_backend()`_ call(s) **followed by a**
-`xshard.reconfigure()`_ **call** before it can hand out
-backends.
-
-_Note_ that due to various restrictions (documented below), it is
-recommended to use the shard director on the backend side.
-
Introduction
````````````
@@ -315,12 +307,14 @@ The drawbacks are:
Method
``````
-When `xshard.reconfigure()`_ is called, a consistent
-hashing circular data structure gets built from the last 32 bits of
-SHA256 hash values of *<ident>*\ *<n>* (default *ident* being the
-backend name) for each backend and for a running number *n* from 1 to
-*replicas*. Hashing creates the seemingly random order for placement
-of backends on the consistent hashing ring. When
+When `xshard.reconfigure()`_ is called explicitly (or implicitly at
+the end of any task containing reconfigurations like
+`xshard.add_backend()`_), a consistent hashing circular data structure
+gets built from the last 32 bits of SHA256 hash values of *<ident>*\
+*<n>* (default *ident* being the backend name) for each backend and
+for a running number *n* from 1 to the *replicas* argument to
+`xshard.reconfigure()`_. Hashing creates the seemingly random order
+for placement of backends on the consistent hashing ring. When
`xshard.add_backend()`_ was called with a *weight* argument,
*replicas* is scaled by that weight to add proportionally more copies
of the that backend on the ring.
@@ -352,6 +346,11 @@ when configuring the shard director, you are advised to check::
varnishlog -I Error:^shard
+Additional information may be provided as Notices, which can be
+checked using
+
+ varnishlog -I Notice:^shard
+
$Method VOID .set_warmup(REAL probability=0.0)
Set the default warmup probability. See the *warmup* parameter of
@@ -398,31 +397,26 @@ at least 1. Values above 10 probably do not make much sense. The
effect of *weight* is also capped such that the total number of
replicas does not exceed `UINT32_MAX`.
-NOTE: Backend changes need to be finalized with
-`xshard.reconfigure()`_.
-
$Method BOOL .remove_backend([BACKEND backend=0], [STRING ident=0])
Remove backend(s) from the director. Either *backend* or *ident* must
be specified. *ident* removes a specific instance. If *backend* is
given without *ident*, all instances of this backend are removed.
-NOTE: Backend changes need to be finalized with
-`xshard.reconfigure()`_.
-
$Method BOOL .clear()
Remove all backends from the director.
-NOTE: Backend changes need to be finalized with
-`xshard.reconfigure()`_.
-
$Method BOOL .reconfigure(INT replicas=67)
-Reconfigure the consistent hashing ring to reflect backend changes.
+.. !! mirror changes to shard_cfg.c: shard_change_fini()
+
+Explicitly reconfigure the consistent hashing ring to reflect backend
+changes to become effective immediately.
-This method must be called at least once before the director can be
-used.
+If this method is not called explicitly, reconfiguration happens at
+the end of the current task (after ``vcl_init {}`` or when the current
+client or backend task is finished).
$Method INT .key(STRANDS)
diff --git a/lib/libvmod_directors/vmod_shard.c b/lib/libvmod_directors/vmod_shard.c
index 18c3465ce..42dbcaf85 100644
--- a/lib/libvmod_directors/vmod_shard.c
+++ b/lib/libvmod_directors/vmod_shard.c
@@ -540,7 +540,7 @@ shard_param_args(VRT_CTX,
}
if (key_blob == NULL || key_blob->len == 0 ||
key_blob->blob == NULL) {
- sharddir_err(ctx, SLT_Error, "%s %s: "
+ sharddir_err(ctx->vsl, SLT_Error, "%s %s: "
"by=BLOB but no or empty key_blob "
"- using key 0",
who, p->vcl_name);
More information about the varnish-commit
mailing list