[master] c2d45aefb Try to send VDP_END with the last bytes
Dridi Boukelmoune
dridi.boukelmoune at gmail.com
Fri Jan 15 15:04:07 UTC 2021
commit c2d45aefb563bb105f524b67b84d20649ecb85a2
Author: Nils Goroll <nils.goroll at uplex.de>
Date: Mon Apr 27 12:55:14 2020 +0200
Try to send VDP_END with the last bytes
Follow-up to #3125 :
That PR added VDP_END, but always issued another VDP_bytes() call with
no data.
We now make it the responsibility of the object iterator to send
VDP_END and opportunistically try to issue it with the last bytes in
order to avoid the additional VDP_bytes() call with no data.
As noted by Martin in
https://github.com/varnishcache/varnish-cache/pull/3125#issuecomment-600620905
the advantage is that "in H/2 there is the need to send a frame with a
last bit set to mark the end of the stream" and in H/1 we could generate
the end chunk right after the last bit of data.
This change does not implement these optimizations yet, but prepares the
grounds.
diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c
index 7dbe03a0a..d4a074bdc 100644
--- a/bin/varnishd/cache/cache_deliver_proc.c
+++ b/bin/varnishd/cache/cache_deliver_proc.c
@@ -217,8 +217,16 @@ VDP_Close(struct vdp_ctx *vdc)
static int v_matchproto_(objiterate_f)
vdp_objiterator(void *priv, unsigned flush, const void *ptr, ssize_t len)
{
+ enum vdp_action act;
- return (VDP_bytes(priv, flush ? VDP_FLUSH : VDP_NULL, ptr, len));
+ if (flush == 0)
+ act = VDP_NULL;
+ else if ((flush & OBJ_ITER_END) != 0)
+ act = VDP_END;
+ else
+ act = VDP_FLUSH;
+
+ return (VDP_bytes(priv, act, ptr, len));
}
@@ -234,8 +242,6 @@ VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc)
vdc->req = NULL;
final = oc->flags & (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP) ? 1 : 0;
r = ObjIterate(vdc->wrk, oc, vdc, vdp_objiterator, final);
- if (r == 0)
- r = VDP_bytes(vdc, VDP_END, NULL, 0);
if (r < 0)
return (r);
return (0);
diff --git a/bin/varnishd/cache/cache_esi_deliver.c b/bin/varnishd/cache/cache_esi_deliver.c
index a1a5afe04..bf75ef217 100644
--- a/bin/varnishd/cache/cache_esi_deliver.c
+++ b/bin/varnishd/cache/cache_esi_deliver.c
@@ -315,6 +315,9 @@ ved_vdp_esi_bytes(struct vdp_ctx *vdx, enum vdp_action act, void **priv,
struct ecx *ecx;
int retval = 0;
+ if (act == VDP_END)
+ act = VDP_FLUSH;
+
AN(priv);
CHECK_OBJ_NOTNULL(vdx, VDP_CTX_MAGIC);
CAST_OBJ_NOTNULL(ecx, *priv, ECX_MAGIC);
@@ -432,7 +435,6 @@ ved_vdp_esi_bytes(struct vdp_ctx *vdx, enum vdp_action act, void **priv,
return (retval);
case 3:
case 4:
- assert(act != VDP_END);
/*
* There is no guarantee that the 'l' bytes are all
* in the same storage segment, so loop over storage
diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c
index ccba6b525..6bfb1f8f6 100644
--- a/bin/varnishd/storage/storage_simple.c
+++ b/bin/varnishd/storage/storage_simple.c
@@ -278,6 +278,7 @@ sml_iterator(struct worker *wrk, struct objcore *oc,
p = NULL;
l = 0;
+ u = 0;
while (1) {
ol = len;
nl = ObjWaitExtend(wrk, oc, ol);
@@ -333,11 +334,15 @@ sml_iterator(struct worker *wrk, struct objcore *oc,
u = 0;
if (st == NULL || final)
u |= OBJ_ITER_FLUSH;
+ if (st == NULL && boc->state == BOS_FINISHED)
+ u |= OBJ_ITER_END;
ret = func(priv, u, p, l);
if (ret)
break;
}
HSH_DerefBoc(wrk, oc);
+ if ((u & OBJ_ITER_END) == 0)
+ func(priv, OBJ_ITER_END, NULL, 0);
return (ret);
}
More information about the varnish-commit
mailing list