From dridi.boukelmoune at gmail.com Mon Dec 5 10:34:05 2022 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Mon, 5 Dec 2022 10:34:05 +0000 (UTC) Subject: [master] 0dcdb5076 man: Weak Last-Modified headers are ignored Message-ID: <20221205103405.B82AFB26DD@lists.varnish-cache.org> commit 0dcdb5076e08f47b47004d178f564616b6677e1d Author: Dridi Boukelmoune Date: Mon Nov 28 22:52:14 2022 +0100 man: Weak Last-Modified headers are ignored diff --git a/doc/sphinx/reference/vcl_var.rst b/doc/sphinx/reference/vcl_var.rst index 361801fe0..41bbe98db 100644 --- a/doc/sphinx/reference/vcl_var.rst +++ b/doc/sphinx/reference/vcl_var.rst @@ -1396,8 +1396,8 @@ resp.status 304 is sent. Secondly, ``req.http.If-Modified-Since`` is compared against - ``resp.http.Last-Modified`` or, if it is unset, against the - point in time when the object was last modified based on the + ``resp.http.Last-Modified`` or, if it is unset or weak, against + the point in time when the object was last modified based on the ``Date`` and ``Age`` headers received with the backend response which created the object. If the object has not been modified based on that comparison, a 304 is sent. From dridi.boukelmoune at gmail.com Mon Dec 5 10:34:05 2022 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Mon, 5 Dec 2022 10:34:05 +0000 (UTC) Subject: [master] 4e4d66b33 acceptor: Address false positive in Coverity Scan Message-ID: <20221205103405.DC12CB26E0@lists.varnish-cache.org> commit 4e4d66b33ee79217808936f9b6d0796456e2ab7f Author: Dridi Boukelmoune Date: Mon Dec 5 11:26:51 2022 +0100 acceptor: Address false positive in Coverity Scan Upon success we expect a non-null pointer, and to make sure this is not residual uninitialized stack memory, it is null'd beforehand. /bin/varnishd/cache/cache_acceptor.c: 375 in vca_mk_tcp() 369 { 370 struct suckaddr *sa; 371 ssize_t sz; 372 373 AN(SES_Reserve_remote_addr(sp, &sa, &sz)); 374 assert(sz == vsa_suckaddr_len); >>> CID 1517259: Resource leaks (RESOURCE_LEAK) >>> Failing to save or free storage allocated by "VSA_Build(sa, &wa->acceptaddr, wa->acceptaddrlen)" leaks it. 375 AN(VSA_Build(sa, &wa->acceptaddr, wa->acceptaddrlen)); 376 sp->sattr[SA_CLIENT_ADDR] = sp->sattr[SA_REMOTE_ADDR]; It is precisely because we reserve the space to store the built suckaddr that there won't be a malloc() call made behind our back. For some reason, it only complained about one call site. diff --git a/bin/varnishd/cache/cache_acceptor.c b/bin/varnishd/cache/cache_acceptor.c index 67947114b..b25e186f0 100644 --- a/bin/varnishd/cache/cache_acceptor.c +++ b/bin/varnishd/cache/cache_acceptor.c @@ -367,10 +367,11 @@ static void vca_mk_tcp(const struct wrk_accept *wa, struct sess *sp, char *laddr, char *lport, char *raddr, char *rport) { - struct suckaddr *sa; + struct suckaddr *sa = NULL; ssize_t sz; AN(SES_Reserve_remote_addr(sp, &sa, &sz)); + AN(sa); assert(sz == vsa_suckaddr_len); AN(VSA_Build(sa, &wa->acceptaddr, wa->acceptaddrlen)); sp->sattr[SA_CLIENT_ADDR] = sp->sattr[SA_REMOTE_ADDR]; @@ -390,11 +391,12 @@ static void vca_mk_uds(struct wrk_accept *wa, struct sess *sp, char *laddr, char *lport, char *raddr, char *rport) { - struct suckaddr *sa; + struct suckaddr *sa = NULL; ssize_t sz; (void) wa; AN(SES_Reserve_remote_addr(sp, &sa, &sz)); + AN(sa); assert(sz == vsa_suckaddr_len); AZ(SES_Set_remote_addr(sp, bogo_ip)); sp->sattr[SA_CLIENT_ADDR] = sp->sattr[SA_REMOTE_ADDR]; From phk at FreeBSD.org Mon Dec 5 10:34:05 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 5 Dec 2022 10:34:05 +0000 (UTC) Subject: [master] 010eef94b Use uint64_t instead of uintptr_t, no pointers are ever stored here.. Message-ID: <20221205103406.05A20B26E3@lists.varnish-cache.org> commit 010eef94bec582f8ba28eb7116ed59ecd187ebe9 Author: Poul-Henning Kamp Date: Mon Dec 5 10:32:38 2022 +0000 Use uint64_t instead of uintptr_t, no pointers are ever stored here.. diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index 0a3f9e092..f1d107c15 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -271,7 +271,7 @@ struct worker { struct storeobj { const struct stevedore *stevedore; void *priv; - uintptr_t priv2; + uint64_t priv2; }; /* Busy Objcore structure -------------------------------------------- From dridi at varni.sh Mon Dec 5 10:40:58 2022 From: dridi at varni.sh (Dridi Boukelmoune) Date: Mon, 5 Dec 2022 10:40:58 +0000 Subject: [master] 4e4d66b33 acceptor: Address false positive in Coverity Scan In-Reply-To: <20221205103405.DC12CB26E0@lists.varnish-cache.org> References: <20221205103405.DC12CB26E0@lists.varnish-cache.org> Message-ID: > For some reason, it only complained about one call site. Because there's only one call site... From dridi.boukelmoune at gmail.com Mon Dec 5 10:45:06 2022 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Mon, 5 Dec 2022 10:45:06 +0000 (UTC) Subject: [master] c0481916e http1: Emit ReqStart on bad requests Message-ID: <20221205104506.E58BFBE227@lists.varnish-cache.org> commit c0481916ede75035a61a74221eb06cf27db44c36 Author: Dridi Boukelmoune Date: Tue Aug 30 16:24:34 2022 +0200 http1: Emit ReqStart on bad requests It makes no sense to emit this just before we end the transaction, except to help varnishncsa find the client IP address. diff --git a/bin/varnishd/http1/cache_http1_fsm.c b/bin/varnishd/http1/cache_http1_fsm.c index 0d23af1de..4a3012551 100644 --- a/bin/varnishd/http1/cache_http1_fsm.c +++ b/bin/varnishd/http1/cache_http1_fsm.c @@ -44,6 +44,7 @@ #include "cache/cache_transport.h" #include "cache_http1.h" +#include "common/heritage.h" #include "vtcp.h" static const char H1NEWREQ[] = "HTTP1::NewReq"; @@ -245,6 +246,7 @@ http1_abort(struct req *req, uint16_t status) static int http1_dissect(struct worker *wrk, struct req *req) { + const char *ci, *cp, *endpname; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); @@ -271,6 +273,15 @@ http1_dissect(struct worker *wrk, struct req *req) (int)(req->htc->rxbuf_e - req->htc->rxbuf_b), req->htc->rxbuf_b); wrk->stats->client_req_400++; + + ci = SES_Get_String_Attr(req->sp, SA_CLIENT_IP); + cp = SES_Get_String_Attr(req->sp, SA_CLIENT_PORT); + CHECK_OBJ_NOTNULL(req->sp->listen_sock, LISTEN_SOCK_MAGIC); + endpname = req->sp->listen_sock->name; + AN(endpname); + + VSLb(req->vsl, SLT_ReqStart, "%s %s %s", ci, cp, endpname); + req->doclose = SC_RX_JUNK; http1_abort(req, 400); return (-1); diff --git a/bin/varnishtest/tests/u00003.vtc b/bin/varnishtest/tests/u00003.vtc index 65aadee41..0cdbdd45e 100644 --- a/bin/varnishtest/tests/u00003.vtc +++ b/bin/varnishtest/tests/u00003.vtc @@ -45,7 +45,7 @@ delay 1 shell "mv ${tmpdir}/ncsa.log ${tmpdir}/ncsa.old.log" shell "kill -HUP `cat ${tmpdir}/ncsa.pid`" -client c1 { +client c2 { txreq -url /2 rxresp } -run @@ -168,4 +168,18 @@ delay 1 process p2 -expect-exit 1 -kill INT -wait shell {grep -q "VSM: Attach interrupted" ${p2_err}} +# IP address for bad requests + +client c3 { + txreq -url "/bad path" + rxresp + expect resp.status == 400 +} -run + +varnish v1 -vsl_catchup + +shell -expect ${localhost} { + varnishncsa -n ${v1_name} -d -q 'RespStatus == 400' +} + # ESI coverage in e00003.vtc From dridi.boukelmoune at gmail.com Mon Dec 5 10:45:07 2022 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Mon, 5 Dec 2022 10:45:07 +0000 (UTC) Subject: [master] 656416a16 req: Centralize SLT_ReqStart logic Message-ID: <20221205104507.26A2BBE230@lists.varnish-cache.org> commit 656416a16d6a19669eefae2edd5be17dc53fa578 Author: Dridi Boukelmoune Date: Mon Dec 5 11:09:36 2022 +0100 req: Centralize SLT_ReqStart logic diff --git a/bin/varnishd/cache/cache_req.c b/bin/varnishd/cache/cache_req.c index 46166956d..46b183140 100644 --- a/bin/varnishd/cache/cache_req.c +++ b/bin/varnishd/cache/cache_req.c @@ -34,15 +34,15 @@ #include "config.h" -#include "cache_varnishd.h" -#include "cache_filter.h" - #include #include +#include "cache_varnishd.h" +#include "cache_filter.h" #include "cache_pool.h" #include "cache_transport.h" +#include "common/heritage.h" #include "vtim.h" void @@ -99,6 +99,25 @@ Req_LogHit(struct worker *wrk, struct req *req, struct objcore *oc, } } +const char * +Req_LogStart(struct worker *wrk, struct req *req) +{ + const char *ci, *cp, *endpname; + + CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); + CHECK_OBJ_NOTNULL(req, REQ_MAGIC); + CHECK_OBJ_NOTNULL(req->sp, SESS_MAGIC); + + ci = SES_Get_String_Attr(req->sp, SA_CLIENT_IP); + cp = SES_Get_String_Attr(req->sp, SA_CLIENT_PORT); + CHECK_OBJ_NOTNULL(req->sp->listen_sock, LISTEN_SOCK_MAGIC); + endpname = req->sp->listen_sock->name; + AN(endpname); + VSLb(req->vsl, SLT_ReqStart, "%s %s %s", ci, cp, endpname); + + return (ci); +} + /*-------------------------------------------------------------------- * Alloc/Free a request */ diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c index c3dbe311f..ac1a66a5d 100644 --- a/bin/varnishd/cache/cache_req_fsm.c +++ b/bin/varnishd/cache/cache_req_fsm.c @@ -49,7 +49,6 @@ #include "hash/hash_slinger.h" #include "http1/cache_http1.h" #include "storage/storage.h" -#include "common/heritage.h" #include "vcl.h" #include "vct.h" #include "vsha256.h" @@ -909,7 +908,7 @@ cnt_recv(struct worker *wrk, struct req *req) { unsigned recv_handling; struct VSHA256Context sha256ctx; - const char *ci, *cp, *endpname; + const char *ci; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); @@ -922,13 +921,7 @@ cnt_recv(struct worker *wrk, struct req *req) AZ(isnan(req->t_prev)); AZ(isnan(req->t_req)); - ci = SES_Get_String_Attr(req->sp, SA_CLIENT_IP); - cp = SES_Get_String_Attr(req->sp, SA_CLIENT_PORT); - CHECK_OBJ_NOTNULL(req->sp->listen_sock, LISTEN_SOCK_MAGIC); - endpname = req->sp->listen_sock->name; - AN(endpname); - VSLb(req->vsl, SLT_ReqStart, "%s %s %s", ci, cp, endpname); - + ci = Req_LogStart(wrk, req); http_VSL_log(req->http); if (http_CountHdr(req->http0, H_Host) > 1) { diff --git a/bin/varnishd/cache/cache_varnishd.h b/bin/varnishd/cache/cache_varnishd.h index b90affc6a..cf397dd81 100644 --- a/bin/varnishd/cache/cache_varnishd.h +++ b/bin/varnishd/cache/cache_varnishd.h @@ -404,6 +404,7 @@ void Req_Cleanup(struct sess *sp, struct worker *wrk, struct req *req); void Req_Fail(struct req *req, stream_close_t reason); void Req_AcctLogCharge(struct VSC_main_wrk *, struct req *); void Req_LogHit(struct worker *, struct req *, struct objcore *, intmax_t); +const char *Req_LogStart(struct worker *, struct req *); /* cache_req_body.c */ int VRB_Ignore(struct req *); diff --git a/bin/varnishd/http1/cache_http1_fsm.c b/bin/varnishd/http1/cache_http1_fsm.c index 4a3012551..cc3420362 100644 --- a/bin/varnishd/http1/cache_http1_fsm.c +++ b/bin/varnishd/http1/cache_http1_fsm.c @@ -35,16 +35,14 @@ #include "config.h" -#include "cache/cache_varnishd.h" - #include #include +#include "cache/cache_varnishd.h" #include "cache/cache_objhead.h" #include "cache/cache_transport.h" #include "cache_http1.h" -#include "common/heritage.h" #include "vtcp.h" static const char H1NEWREQ[] = "HTTP1::NewReq"; @@ -246,7 +244,6 @@ http1_abort(struct req *req, uint16_t status) static int http1_dissect(struct worker *wrk, struct req *req) { - const char *ci, *cp, *endpname; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CHECK_OBJ_NOTNULL(req, REQ_MAGIC); @@ -274,13 +271,7 @@ http1_dissect(struct worker *wrk, struct req *req) req->htc->rxbuf_b); wrk->stats->client_req_400++; - ci = SES_Get_String_Attr(req->sp, SA_CLIENT_IP); - cp = SES_Get_String_Attr(req->sp, SA_CLIENT_PORT); - CHECK_OBJ_NOTNULL(req->sp->listen_sock, LISTEN_SOCK_MAGIC); - endpname = req->sp->listen_sock->name; - AN(endpname); - - VSLb(req->vsl, SLT_ReqStart, "%s %s %s", ci, cp, endpname); + (void)Req_LogStart(wrk, req); req->doclose = SC_RX_JUNK; http1_abort(req, 400); From phk at FreeBSD.org Mon Dec 5 15:41:07 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 5 Dec 2022 15:41:07 +0000 (UTC) Subject: [master] 772c127f6 Constify Message-ID: <20221205154107.E02B690AF@lists.varnish-cache.org> commit 772c127f6ef100664b7c95ece3789b9c3e92a797 Author: Poul-Henning Kamp Date: Mon Dec 5 14:53:48 2022 +0000 Constify diff --git a/bin/varnishd/cache/cache_req.c b/bin/varnishd/cache/cache_req.c index 46b183140..113f3a987 100644 --- a/bin/varnishd/cache/cache_req.c +++ b/bin/varnishd/cache/cache_req.c @@ -100,7 +100,7 @@ Req_LogHit(struct worker *wrk, struct req *req, struct objcore *oc, } const char * -Req_LogStart(struct worker *wrk, struct req *req) +Req_LogStart(const struct worker *wrk, struct req *req) { const char *ci, *cp, *endpname; diff --git a/bin/varnishd/cache/cache_varnishd.h b/bin/varnishd/cache/cache_varnishd.h index cf397dd81..2e335b97c 100644 --- a/bin/varnishd/cache/cache_varnishd.h +++ b/bin/varnishd/cache/cache_varnishd.h @@ -404,7 +404,7 @@ void Req_Cleanup(struct sess *sp, struct worker *wrk, struct req *req); void Req_Fail(struct req *req, stream_close_t reason); void Req_AcctLogCharge(struct VSC_main_wrk *, struct req *); void Req_LogHit(struct worker *, struct req *, struct objcore *, intmax_t); -const char *Req_LogStart(struct worker *, struct req *); +const char *Req_LogStart(const struct worker *, struct req *); /* cache_req_body.c */ int VRB_Ignore(struct req *); From phk at FreeBSD.org Mon Dec 5 15:41:08 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 5 Dec 2022 15:41:08 +0000 (UTC) Subject: [master] 4fa7f784f Update vtree.h from FreeBSD's sys/sys/tree.h (their: c2ddf2edb4aa4) Message-ID: <20221205154108.0CFCE90B3@lists.varnish-cache.org> commit 4fa7f784f26c784d983e5b6b1c1dcba1f233fbc7 Author: Poul-Henning Kamp Date: Mon Dec 5 15:40:11 2022 +0000 Update vtree.h from FreeBSD's sys/sys/tree.h (their: c2ddf2edb4aa4) diff --git a/bin/varnishd/cache/cache_vrt_priv.c b/bin/varnishd/cache/cache_vrt_priv.c index fd4e84fbc..5ebce43b4 100644 --- a/bin/varnishd/cache/cache_vrt_priv.c +++ b/bin/varnishd/cache/cache_vrt_priv.c @@ -55,6 +55,7 @@ static inline int vrt_priv_dyncmp(const struct vrt_priv *, VRBT_GENERATE_INSERT_COLOR(vrt_privs, vrt_priv, entry, static) VRBT_GENERATE_FIND(vrt_privs, vrt_priv, entry, vrt_priv_dyncmp, static) +VRBT_GENERATE_INSERT_FINISH(vrt_privs, vrt_priv, entry, static) VRBT_GENERATE_INSERT(vrt_privs, vrt_priv, entry, vrt_priv_dyncmp, static) VRBT_GENERATE_MINMAX(vrt_privs, vrt_priv, entry, static) VRBT_GENERATE_NEXT(vrt_privs, vrt_priv, entry, static) diff --git a/bin/varnishtop/varnishtop.c b/bin/varnishtop/varnishtop.c index b8d600877..791f143b1 100644 --- a/bin/varnishtop/varnishtop.c +++ b/bin/varnishtop/varnishtop.c @@ -111,6 +111,7 @@ cmp_order(const struct top *a, const struct top *b) } VRBT_GENERATE_INSERT_COLOR(t_order, top, e_order, static) +VRBT_GENERATE_INSERT_FINISH(t_order, top, e_order, static) VRBT_GENERATE_INSERT(t_order, top, e_order, cmp_order, static) VRBT_GENERATE_REMOVE_COLOR(t_order, top, e_order, static) VRBT_GENERATE_MINMAX(t_order, top, e_order, static) @@ -119,6 +120,7 @@ VRBT_GENERATE_REMOVE(t_order, top, e_order, static) VRBT_GENERATE_INSERT_COLOR(t_key, top, e_key, static) VRBT_GENERATE_REMOVE_COLOR(t_key, top, e_key, static) +VRBT_GENERATE_INSERT_FINISH(t_key, top, e_key, static) VRBT_GENERATE_INSERT(t_key, top, e_key, cmp_key, static) VRBT_GENERATE_REMOVE(t_key, top, e_key, static) VRBT_GENERATE_FIND(t_key, top, e_key, cmp_key, static) diff --git a/flint.lnt b/flint.lnt index af5a93da6..1f5598ded 100644 --- a/flint.lnt +++ b/flint.lnt @@ -216,6 +216,12 @@ -emacro(527, VRBT_*) // unreachable code -emacro(740, VRBT_*) // unusual pointer cast -emacro(438, VRBT_*) // last value assigned not used +-emacro(613, VRBT_*) // Possible use of null pointer 'child' in left argument to +-emacro(838, VRBT_*) // Previously assigned value to variable 'child' has not been used +-emacro(50, VRBT_GENERATE_*) // Attempted to take the address of a non-lvalue +-emacro(506, VRBT_GENERATE_*) // Constant value Boolean +-emacro(845, VRBT_GENERATE_*) // The left argument to operator '&&' is certain to be 0 +-emacro(774, VRBT_GENERATE_*) // Boolean within 'if' always evaluates to False -esym(534, *_VRBT_REMOVE) // ignore retval -esym(534, *_VRBT_INSERT) // ignore retval diff --git a/include/vtree.h b/include/vtree.h index 325a2cb1f..bfa5efcd7 100644 --- a/include/vtree.h +++ b/include/vtree.h @@ -59,9 +59,11 @@ * the same, and defines the rank of that node. The rank of the null node * is -1. * - * Different additional conditions define different sorts of balanced - * trees, including "red-black" and "AVL" trees. The set of conditions - * applied here are the "weak-AVL" conditions of Haeupler, Sen and Tarjan: + * Different additional conditions define different sorts of balanced trees, + * including "red-black" and "AVL" trees. The set of conditions applied here + * are the "weak-AVL" conditions of Haeupler, Sen and Tarjan presented in in + * "Rank Balanced Trees", ACM Transactions on Algorithms Volume 11 Issue 4 June + * 2015 Article No.: 30pp 1?26 https://doi.org/10.1145/2689412 (the HST paper): * - every rank-difference is 1 or 2. * - the rank of any leaf is 1. * @@ -179,7 +181,7 @@ name##_VSPLAY_INSERT(struct name *head, struct type *elm) \ if (VSPLAY_EMPTY(head)) { \ VSPLAY_LEFT(elm, field) = VSPLAY_RIGHT(elm, field) = NULL; \ } else { \ - int __comp; \ + __typeof(cmp(NULL, NULL)) __comp; \ name##_VSPLAY(head, elm); \ __comp = (cmp)(elm, (head)->sph_root); \ if (__comp < 0) { \ @@ -222,7 +224,7 @@ void \ name##_VSPLAY(struct name *head, struct type *elm) \ { \ struct type __node, *__left, *__right, *__tmp; \ - int __comp; \ + __typeof(cmp(NULL, NULL)) __comp; \ \ VSPLAY_LEFT(&__node, field) = VSPLAY_RIGHT(&__node, field) = NULL;\ __left = __right = &__node; \ @@ -321,14 +323,9 @@ struct name { \ #define VRBT_ENTRY(type) \ struct { \ - struct type *rbe_left; /* left element */ \ - struct type *rbe_right; /* right element */ \ - struct type *rbe_parent; /* parent element */ \ + struct type *rbe_link[3]; \ } -#define VRBT_LEFT(elm, field) (elm)->field.rbe_left -#define VRBT_RIGHT(elm, field) (elm)->field.rbe_right - /* * With the expectation that any object of struct type has an * address that is a multiple of 4, and that therefore the @@ -336,63 +333,78 @@ struct { \ * always zero, this implementation sets those bits to indicate * that the left or right child of the tree node is "red". */ -#define VRBT_UP(elm, field) (elm)->field.rbe_parent -#define VRBT_BITS(elm, field) (*(uintptr_t *)&VRBT_UP(elm, field)) -#define VRBT_RED_L ((uintptr_t)1) -#define VRBT_RED_R ((uintptr_t)2) -#define VRBT_RED_MASK ((uintptr_t)3) -#define VRBT_FLIP_LEFT(elm, field) (VRBT_BITS(elm, field) ^= VRBT_RED_L) -#define VRBT_FLIP_RIGHT(elm, field) (VRBT_BITS(elm, field) ^= VRBT_RED_R) -#define VRBT_RED_LEFT(elm, field) ((VRBT_BITS(elm, field) & VRBT_RED_L) != 0) -#define VRBT_RED_RIGHT(elm, field) ((VRBT_BITS(elm, field) & VRBT_RED_R) != 0) -#define VRBT_PARENT(elm, field) ((__typeof(VRBT_UP(elm, field))) \ - (VRBT_BITS(elm, field) & ~VRBT_RED_MASK)) +#define _VRBT_LINK(elm, dir, field) (elm)->field.rbe_link[dir] +#define _VRBT_UP(elm, field) _VRBT_LINK(elm, 0, field) +#define _VRBT_L ((uintptr_t)1) +#define _VRBT_R ((uintptr_t)2) +#define _VRBT_LR ((uintptr_t)3) +#define _VRBT_BITS(elm) (*(uintptr_t *)&elm) +#define _VRBT_BITSUP(elm, field) _VRBT_BITS(_VRBT_UP(elm, field)) +#define _VRBT_PTR(elm) (__typeof(elm)) \ + ((uintptr_t)elm & ~_VRBT_LR) + +#define VRBT_PARENT(elm, field) _VRBT_PTR(_VRBT_UP(elm, field)) +#define VRBT_LEFT(elm, field) _VRBT_LINK(elm, _VRBT_L, field) +#define VRBT_RIGHT(elm, field) _VRBT_LINK(elm, _VRBT_R, field) #define VRBT_ROOT(head) (head)->rbh_root #define VRBT_EMPTY(head) (VRBT_ROOT(head) == NULL) #define VRBT_SET_PARENT(dst, src, field) do { \ - VRBT_BITS(dst, field) &= VRBT_RED_MASK; \ - VRBT_BITS(dst, field) |= (uintptr_t)src; \ + _VRBT_BITSUP(dst, field) = (uintptr_t)src | \ + (_VRBT_BITSUP(dst, field) & _VRBT_LR); \ } while (/*CONSTCOND*/ 0) #define VRBT_SET(elm, parent, field) do { \ - VRBT_UP(elm, field) = parent; \ + _VRBT_UP(elm, field) = parent; \ VRBT_LEFT(elm, field) = VRBT_RIGHT(elm, field) = NULL; \ } while (/*CONSTCOND*/ 0) -#define VRBT_COLOR(elm, field) (VRBT_PARENT(elm, field) == NULL ? 0 : \ - VRBT_LEFT(VRBT_PARENT(elm, field), field) == elm ? \ - VRBT_RED_LEFT(VRBT_PARENT(elm, field), field) : \ - VRBT_RED_RIGHT(VRBT_PARENT(elm, field), field)) +/* + * Either VRBT_AUGMENT or VRBT_AUGMENT_CHECK is invoked in a loop at the root of + * every modified subtree, from the bottom up to the root, to update augmented + * node data. VRBT_AUGMENT_CHECK returns true only when the update changes the + * node data, so that updating can be stopped short of the root when it returns + * false. + */ +#ifndef VRBT_AUGMENT_CHECK +#ifndef VRBT_AUGMENT +#define VRBT_AUGMENT_CHECK(x) 0 +#else +#define VRBT_AUGMENT_CHECK(x) (VRBT_AUGMENT(x), 1) +#endif +#endif -#define VRBT_SWAP_CHILD(head, out, in, field) do { \ - if (VRBT_PARENT(out, field) == NULL) \ +#define VRBT_UPDATE_AUGMENT(elm, field) do { \ + __typeof(elm) rb_update_tmp = (elm); \ + while (VRBT_AUGMENT_CHECK(rb_update_tmp) && \ + (rb_update_tmp = VRBT_PARENT(rb_update_tmp, field)) != NULL) \ + ; \ +} while (0) + +#define VRBT_SWAP_CHILD(head, par, out, in, field) do { \ + if (par == NULL) \ VRBT_ROOT(head) = (in); \ - else if ((out) == VRBT_LEFT(VRBT_PARENT(out, field), field)) \ - VRBT_LEFT(VRBT_PARENT(out, field), field) = (in); \ + else if ((out) == VRBT_LEFT(par, field)) \ + VRBT_LEFT(par, field) = (in); \ else \ - VRBT_RIGHT(VRBT_PARENT(out, field), field) = (in); \ + VRBT_RIGHT(par, field) = (in); \ } while (/*CONSTCOND*/ 0) -#define VRBT_ROTATE_LEFT(head, elm, tmp, field) do { \ - (tmp) = VRBT_RIGHT(elm, field); \ - if ((VRBT_RIGHT(elm, field) = VRBT_LEFT(tmp, field)) != NULL) { \ - VRBT_SET_PARENT(VRBT_RIGHT(elm, field), elm, field); \ - } \ - VRBT_SET_PARENT(tmp, VRBT_PARENT(elm, field), field); \ - VRBT_SWAP_CHILD(head, elm, tmp, field); \ - VRBT_LEFT(tmp, field) = (elm); \ - VRBT_SET_PARENT(elm, tmp, field); \ -} while (/*CONSTCOND*/ 0) - -#define VRBT_ROTATE_RIGHT(head, elm, tmp, field) do { \ - (tmp) = VRBT_LEFT(elm, field); \ - if ((VRBT_LEFT(elm, field) = VRBT_RIGHT(tmp, field)) != NULL) { \ - VRBT_SET_PARENT(VRBT_LEFT(elm, field), elm, field); \ - } \ - VRBT_SET_PARENT(tmp, VRBT_PARENT(elm, field), field); \ - VRBT_SWAP_CHILD(head, elm, tmp, field); \ - VRBT_RIGHT(tmp, field) = (elm); \ +/* + * VRBT_ROTATE macro partially restructures the tree to improve balance. In the + * case when dir is _VRBT_L, tmp is a right child of elm. After rotation, elm + * is a left child of tmp, and the subtree that represented the items between + * them, which formerly hung to the left of tmp now hangs to the right of elm. + * The parent-child relationship between elm and its former parent is not + * changed; where this macro once updated those fields, that is now left to the + * caller of VRBT_ROTATE to clean up, so that a pair of rotations does not twice + * update the same pair of pointer fields with distinct values. + */ +#define VRBT_ROTATE(elm, tmp, dir, field) do { \ + if ((_VRBT_LINK(elm, dir ^ _VRBT_LR, field) = \ + _VRBT_LINK(tmp, dir, field)) != NULL) \ + VRBT_SET_PARENT(_VRBT_LINK(tmp, dir, field), elm, field); \ + _VRBT_LINK(tmp, dir, field) = (elm); \ VRBT_SET_PARENT(elm, tmp, field); \ } while (/*CONSTCOND*/ 0) @@ -402,35 +414,55 @@ struct { \ #define VRBT_PROTOTYPE_STATIC(name, type, field, cmp) \ VRBT_PROTOTYPE_INTERNAL(name, type, field, cmp, v_unused_ static) #define VRBT_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ + VRBT_PROTOTYPE_RANK(name, type, attr) \ VRBT_PROTOTYPE_INSERT_COLOR(name, type, attr); \ VRBT_PROTOTYPE_REMOVE_COLOR(name, type, attr); \ + VRBT_PROTOTYPE_INSERT_FINISH(name, type, attr); \ VRBT_PROTOTYPE_INSERT(name, type, attr); \ VRBT_PROTOTYPE_REMOVE(name, type, attr); \ VRBT_PROTOTYPE_FIND(name, type, attr); \ VRBT_PROTOTYPE_NFIND(name, type, attr); \ VRBT_PROTOTYPE_NEXT(name, type, attr); \ + VRBT_PROTOTYPE_INSERT_NEXT(name, type, attr); \ VRBT_PROTOTYPE_PREV(name, type, attr); \ + VRBT_PROTOTYPE_INSERT_PREV(name, type, attr); \ VRBT_PROTOTYPE_MINMAX(name, type, attr); \ VRBT_PROTOTYPE_REINSERT(name, type, attr); +#ifdef _VRBT_DIAGNOSTIC +#define VRBT_PROTOTYPE_RANK(name, type, attr) \ + attr int name##_VRBT_RANK(struct type *); +#else +#define VRBT_PROTOTYPE_RANK(name, type, attr) +#endif #define VRBT_PROTOTYPE_INSERT_COLOR(name, type, attr) \ - attr void name##_VRBT_INSERT_COLOR(struct name *, struct type *) + attr struct type *name##_VRBT_INSERT_COLOR(struct name *, \ + struct type *, struct type *) #define VRBT_PROTOTYPE_REMOVE_COLOR(name, type, attr) \ - attr void name##_VRBT_REMOVE_COLOR(struct name *, \ + attr struct type *name##_VRBT_REMOVE_COLOR(struct name *, \ struct type *, struct type *) #define VRBT_PROTOTYPE_REMOVE(name, type, attr) \ attr struct type *name##_VRBT_REMOVE(struct name *, struct type *) +#define VRBT_PROTOTYPE_INSERT_FINISH(name, type, attr) \ + attr struct type *name##_VRBT_INSERT_FINISH(struct name *, \ + struct type *, struct type **, struct type *) #define VRBT_PROTOTYPE_INSERT(name, type, attr) \ attr struct type *name##_VRBT_INSERT(struct name *, struct type *) #define VRBT_PROTOTYPE_FIND(name, type, attr) \ - attr struct type *name##_VRBT_FIND(const struct name *, const struct type *) + attr const struct type *name##_VRBT_FIND(const struct name *, const struct type *) #define VRBT_PROTOTYPE_NFIND(name, type, attr) \ - attr struct type *name##_VRBT_NFIND(const struct name *, const struct type *) + attr struct type *name##_VRBT_NFIND(struct name *, struct type *) #define VRBT_PROTOTYPE_NEXT(name, type, attr) \ attr struct type *name##_VRBT_NEXT(struct type *) +#define VRBT_PROTOTYPE_INSERT_NEXT(name, type, attr) \ + attr struct type *name##_VRBT_INSERT_NEXT(struct name *, \ + struct type *, struct type *) #define VRBT_PROTOTYPE_PREV(name, type, attr) \ attr struct type *name##_VRBT_PREV(struct type *) +#define VRBT_PROTOTYPE_INSERT_PREV(name, type, attr) \ + attr struct type *name##_VRBT_INSERT_PREV(struct name *, \ + struct type *, struct type *) #define VRBT_PROTOTYPE_MINMAX(name, type, attr) \ - attr struct type *name##_VRBT_MINMAX(const struct name *, int) + attr const struct type *name##_VRBT_MINMAX(struct name *, int) #define VRBT_PROTOTYPE_REINSERT(name, type, attr) \ attr struct type *name##_VRBT_REINSERT(struct name *, struct type *) @@ -442,186 +474,371 @@ struct { \ #define VRBT_GENERATE_STATIC(name, type, field, cmp) \ VRBT_GENERATE_INTERNAL(name, type, field, cmp, v_unused_ static) #define VRBT_GENERATE_INTERNAL(name, type, field, cmp, attr) \ + VRBT_GENERATE_RANK(name, type, field, attr) \ VRBT_GENERATE_INSERT_COLOR(name, type, field, attr) \ VRBT_GENERATE_REMOVE_COLOR(name, type, field, attr) \ + VRBT_GENERATE_INSERT_FINISH(name, type, field, attr) \ VRBT_GENERATE_INSERT(name, type, field, cmp, attr) \ VRBT_GENERATE_REMOVE(name, type, field, attr) \ VRBT_GENERATE_FIND(name, type, field, cmp, attr) \ VRBT_GENERATE_NFIND(name, type, field, cmp, attr) \ VRBT_GENERATE_NEXT(name, type, field, attr) \ + VRBT_GENERATE_INSERT_NEXT(name, type, field, cmp, attr) \ VRBT_GENERATE_PREV(name, type, field, attr) \ + VRBT_GENERATE_INSERT_PREV(name, type, field, cmp, attr) \ VRBT_GENERATE_MINMAX(name, type, field, attr) \ VRBT_GENERATE_REINSERT(name, type, field, cmp, attr) +#ifdef _VRBT_DIAGNOSTIC +#ifndef VRBT_AUGMENT +#define _VRBT_AUGMENT_VERIFY(x) VRBT_AUGMENT_CHECK(x) +#else +#define _VRBT_AUGMENT_VERIFY(x) 0 +#endif +#define VRBT_GENERATE_RANK(name, type, field, attr) \ +/* \ + * Return the rank of the subtree rooted at elm, or -1 if the subtree \ + * is not rank-balanced, or has inconsistent augmentation data. + */ \ +attr int \ +name##_VRBT_RANK(struct type *elm) \ +{ \ + struct type *left, *right, *up; \ + int left_rank, right_rank; \ + \ + if (elm == NULL) \ + return (0); \ + up = _VRBT_UP(elm, field); \ + left = VRBT_LEFT(elm, field); \ + left_rank = ((_VRBT_BITS(up) & _VRBT_L) ? 2 : 1) + \ + name##_VRBT_RANK(left); \ + right = VRBT_RIGHT(elm, field); \ + right_rank = ((_VRBT_BITS(up) & _VRBT_R) ? 2 : 1) + \ + name##_VRBT_RANK(right); \ + if (left_rank != right_rank || \ + (left_rank == 2 && left == NULL && right == NULL) || \ + _VRBT_AUGMENT_VERIFY(elm)) \ + return (-1); \ + return (left_rank); \ +} +#else +#define VRBT_GENERATE_RANK(name, type, field, attr) +#endif + #define VRBT_GENERATE_INSERT_COLOR(name, type, field, attr) \ -attr void \ -name##_VRBT_INSERT_COLOR(struct name *head, struct type *elm) \ +attr struct type * \ +name##_VRBT_INSERT_COLOR(struct name *head, \ + struct type *parent, struct type *elm) \ { \ - struct type *child, *parent; \ - while ((parent = VRBT_PARENT(elm, field)) != NULL) { \ - if (VRBT_LEFT(parent, field) == elm) { \ - if (VRBT_RED_LEFT(parent, field)) { \ - VRBT_FLIP_LEFT(parent, field); \ - return; \ - } \ - VRBT_FLIP_RIGHT(parent, field); \ - if (VRBT_RED_RIGHT(parent, field)) { \ - elm = parent; \ - continue; \ - } \ - if (!VRBT_RED_RIGHT(elm, field)) { \ - VRBT_FLIP_LEFT(elm, field); \ - VRBT_ROTATE_LEFT(head, elm, child, field);\ - if (VRBT_RED_LEFT(child, field)) \ - VRBT_FLIP_RIGHT(elm, field); \ - else if (VRBT_RED_RIGHT(child, field)) \ - VRBT_FLIP_LEFT(parent, field); \ - AN(parent); \ - elm = child; \ - } \ - VRBT_ROTATE_RIGHT(head, parent, elm, field); \ - } else { \ - if (VRBT_RED_RIGHT(parent, field)) { \ - VRBT_FLIP_RIGHT(parent, field); \ - return; \ - } \ - VRBT_FLIP_LEFT(parent, field); \ - if (VRBT_RED_LEFT(parent, field)) { \ - elm = parent; \ - continue; \ - } \ - if (!VRBT_RED_LEFT(elm, field)) { \ - VRBT_FLIP_RIGHT(elm, field); \ - VRBT_ROTATE_RIGHT(head, elm, child, field);\ - if (VRBT_RED_RIGHT(child, field)) \ - VRBT_FLIP_LEFT(elm, field); \ - else if (VRBT_RED_LEFT(child, field)) \ - VRBT_FLIP_RIGHT(parent, field); \ - elm = child; \ - } \ - VRBT_ROTATE_LEFT(head, parent, elm, field); \ + /* \ + * Initially, elm is a leaf. Either its parent was previously \ + * a leaf, with two black null children, or an interior node \ + * with a black non-null child and a red null child. The \ + * balance criterion "the rank of any leaf is 1" precludes the \ + * possibility of two red null children for the initial parent. \ + * So the first loop iteration cannot lead to accessing an \ + * uninitialized 'child', and a later iteration can only happen \ + * when a value has been assigned to 'child' in the previous \ + * one. \ + */ \ + struct type *child = NULL, *child_up, *gpar; \ + uintptr_t elmdir, sibdir; \ + \ + do { \ + /* the rank of the tree rooted at elm grew */ \ + gpar = _VRBT_UP(parent, field); \ + elmdir = VRBT_RIGHT(parent, field) == elm ? _VRBT_R : _VRBT_L; \ + if (_VRBT_BITS(gpar) & elmdir) { \ + /* shorten the parent-elm edge to rebalance */ \ + _VRBT_BITSUP(parent, field) ^= elmdir; \ + return (NULL); \ } \ - VRBT_BITS(elm, field) &= ~VRBT_RED_MASK; \ - break; \ - } \ + sibdir = elmdir ^ _VRBT_LR; \ + /* the other edge must change length */ \ + _VRBT_BITSUP(parent, field) ^= sibdir; \ + if ((_VRBT_BITS(gpar) & _VRBT_LR) == 0) { \ + /* both edges now short, retry from parent */ \ + child = elm; \ + elm = parent; \ + continue; \ + } \ + _VRBT_UP(parent, field) = gpar = _VRBT_PTR(gpar); \ + if (_VRBT_BITSUP(elm, field) & elmdir) { \ + /* \ + * Exactly one of the edges descending from elm \ + * is long. The long one is in the same \ + * direction as the edge from parent to elm, \ + * so change that by rotation. The edge from \ + * parent to z was shortened above. Shorten \ + * the long edge down from elm, and adjust \ + * other edge lengths based on the downward \ + * edges from 'child'. \ + * \ + * par par \ + * ? ? ? ? \ + * elm z ? z \ + * ? ? child \ + * ? child ? ? \ + * ? ? ? elm ? \ + * w ? ? ? ? y \ + * x y w ? \ + * x \ + */ \ + VRBT_ROTATE(elm, child, elmdir, field); \ + child_up = _VRBT_UP(child, field); \ + if (_VRBT_BITS(child_up) & sibdir) \ + _VRBT_BITSUP(parent, field) ^= elmdir; \ + if (_VRBT_BITS(child_up) & elmdir) \ + _VRBT_BITSUP(elm, field) ^= _VRBT_LR; \ + else \ + _VRBT_BITSUP(elm, field) ^= elmdir; \ + /* if child is a leaf, don't augment elm, \ + * since it is restored to be a leaf again. */ \ + if ((_VRBT_BITS(child_up) & _VRBT_LR) == 0) \ + elm = child; \ + } else \ + child = elm; \ + \ + /* \ + * The long edge descending from 'child' points back \ + * in the direction of 'parent'. Rotate to make \ + * 'parent' a child of 'child', then make both edges \ + * of 'child' short to rebalance. \ + * \ + * par child \ + * ? ? ? ? \ + * ? z x par \ + * child ? ? \ + * ? ? ? z \ + * x ? y \ + * y \ + */ \ + VRBT_ROTATE(parent, child, sibdir, field); \ + _VRBT_UP(child, field) = gpar; \ + VRBT_SWAP_CHILD(head, gpar, parent, child, field); \ + /* \ + * Elements rotated down have new, smaller subtrees, \ + * so update augmentation for them. \ + */ \ + if (elm != child) \ + (void)VRBT_AUGMENT_CHECK(elm); \ + (void)VRBT_AUGMENT_CHECK(parent); \ + return (child); \ + } while ((parent = gpar) != NULL); \ + return (NULL); \ } +#ifndef VRBT_STRICT_HST +/* + * In REMOVE_COLOR, the HST paper, in figure 3, in the single-rotate case, has + * 'parent' with one higher rank, and then reduces its rank if 'parent' has + * become a leaf. This implementation always has the parent in its new position + * with lower rank, to avoid the leaf check. Define VRBT_STRICT_HST to 1 to get + * the behavior that HST describes. + */ +#define VRBT_STRICT_HST 0 +#endif + #define VRBT_GENERATE_REMOVE_COLOR(name, type, field, attr) \ -attr void \ +attr struct type * \ name##_VRBT_REMOVE_COLOR(struct name *head, \ struct type *parent, struct type *elm) \ { \ - struct type *sib; \ - if (VRBT_LEFT(parent, field) == elm && \ - VRBT_RIGHT(parent, field) == elm) { \ - VRBT_BITS(parent, field) &= ~VRBT_RED_MASK; \ + struct type *gpar, *sib, *up; \ + uintptr_t elmdir, sibdir; \ + \ + if (VRBT_RIGHT(parent, field) == elm && \ + VRBT_LEFT(parent, field) == elm) { \ + /* Deleting a leaf that is an only-child creates a \ + * rank-2 leaf. Demote that leaf. */ \ + _VRBT_UP(parent, field) = _VRBT_PTR(_VRBT_UP(parent, field)); \ elm = parent; \ - parent = VRBT_PARENT(elm, field); \ - if (parent == NULL) \ - return; \ + if ((parent = _VRBT_UP(elm, field)) == NULL) \ + return (NULL); \ } \ - do { \ - if (VRBT_LEFT(parent, field) == elm) { \ - if (!VRBT_RED_LEFT(parent, field)) { \ - VRBT_FLIP_LEFT(parent, field); \ - return; \ - } \ - if (VRBT_RED_RIGHT(parent, field)) { \ - VRBT_FLIP_RIGHT(parent, field); \ - elm = parent; \ - continue; \ - } \ - sib = VRBT_RIGHT(parent, field); \ - if ((~VRBT_BITS(sib, field) & VRBT_RED_MASK) == 0) {\ - VRBT_BITS(sib, field) &= ~VRBT_RED_MASK; \ - elm = parent; \ - continue; \ - } \ - VRBT_FLIP_RIGHT(sib, field); \ - if (VRBT_RED_LEFT(sib, field)) \ - VRBT_FLIP_LEFT(parent, field); \ - else if (!VRBT_RED_RIGHT(sib, field)) { \ - VRBT_FLIP_LEFT(parent, field); \ - VRBT_ROTATE_RIGHT(head, sib, elm, field); \ - if (VRBT_RED_RIGHT(elm, field)) \ - VRBT_FLIP_LEFT(sib, field); \ - if (VRBT_RED_LEFT(elm, field)) \ - VRBT_FLIP_RIGHT(parent, field); \ - VRBT_BITS(elm, field) |= VRBT_RED_MASK; \ - sib = elm; \ - } \ - VRBT_ROTATE_LEFT(head, parent, sib, field); \ + do { \ + /* the rank of the tree rooted at elm shrank */ \ + gpar = _VRBT_UP(parent, field); \ + elmdir = VRBT_RIGHT(parent, field) == elm ? _VRBT_R : _VRBT_L; \ + _VRBT_BITS(gpar) ^= elmdir; \ + if (_VRBT_BITS(gpar) & elmdir) { \ + /* lengthen the parent-elm edge to rebalance */ \ + _VRBT_UP(parent, field) = gpar; \ + return (NULL); \ + } \ + if (_VRBT_BITS(gpar) & _VRBT_LR) { \ + /* shorten other edge, retry from parent */ \ + _VRBT_BITS(gpar) ^= _VRBT_LR; \ + _VRBT_UP(parent, field) = gpar; \ + gpar = _VRBT_PTR(gpar); \ + continue; \ + } \ + sibdir = elmdir ^ _VRBT_LR; \ + sib = _VRBT_LINK(parent, sibdir, field); \ + up = _VRBT_UP(sib, field); \ + _VRBT_BITS(up) ^= _VRBT_LR; \ + if ((_VRBT_BITS(up) & _VRBT_LR) == 0) { \ + /* shorten edges descending from sib, retry */ \ + _VRBT_UP(sib, field) = up; \ + continue; \ + } \ + if ((_VRBT_BITS(up) & sibdir) == 0) { \ + /* \ + * The edge descending from 'sib' away from \ + * 'parent' is long. The short edge descending \ + * from 'sib' toward 'parent' points to 'elm*' \ + * Rotate to make 'sib' a child of 'elm*' \ + * then adjust the lengths of the edges \ + * descending from 'sib' and 'elm*'. \ + * \ + * par par \ + * ? ? ? ? \ + * ? sib elm ? \ + * ? / ? elm* \ + * elm elm* ? ? ? \ + * ? ? ? ? ? \ + * ? ? z ? ? \ + * x y x sib \ + * ? ? \ + * ? z \ + * y \ + */ \ + elm = _VRBT_LINK(sib, elmdir, field); \ + /* elm is a 1-child. First rotate at elm. */ \ + VRBT_ROTATE(sib, elm, sibdir, field); \ + up = _VRBT_UP(elm, field); \ + _VRBT_BITSUP(parent, field) ^= \ + (_VRBT_BITS(up) & elmdir) ? _VRBT_LR : elmdir; \ + _VRBT_BITSUP(sib, field) ^= \ + (_VRBT_BITS(up) & sibdir) ? _VRBT_LR : sibdir; \ + _VRBT_BITSUP(elm, field) |= _VRBT_LR; \ } else { \ - if (!VRBT_RED_RIGHT(parent, field)) { \ - VRBT_FLIP_RIGHT(parent, field); \ - return; \ - } \ - if (VRBT_RED_LEFT(parent, field)) { \ - VRBT_FLIP_LEFT(parent, field); \ - elm = parent; \ - continue; \ - } \ - sib = VRBT_LEFT(parent, field); \ - if ((~VRBT_BITS(sib, field) & VRBT_RED_MASK) == 0) {\ - VRBT_BITS(sib, field) &= ~VRBT_RED_MASK; \ - elm = parent; \ - continue; \ - } \ - VRBT_FLIP_LEFT(sib, field); \ - if (VRBT_RED_RIGHT(sib, field)) \ - VRBT_FLIP_RIGHT(parent, field); \ - else if (!VRBT_RED_LEFT(sib, field)) { \ - VRBT_FLIP_RIGHT(parent, field); \ - VRBT_ROTATE_LEFT(head, sib, elm, field); \ - if (VRBT_RED_LEFT(elm, field)) \ - VRBT_FLIP_RIGHT(sib, field); \ - if (VRBT_RED_RIGHT(elm, field)) \ - VRBT_FLIP_LEFT(parent, field); \ - VRBT_BITS(elm, field) |= VRBT_RED_MASK; \ - sib = elm; \ - } \ - VRBT_ROTATE_RIGHT(head, parent, sib, field); \ + if ((_VRBT_BITS(up) & elmdir) == 0 && \ + VRBT_STRICT_HST && elm != NULL) { \ + /* if parent does not become a leaf, \ + do not demote parent yet. */ \ + _VRBT_BITSUP(parent, field) ^= sibdir; \ + _VRBT_BITSUP(sib, field) ^= _VRBT_LR; \ + } else if ((_VRBT_BITS(up) & elmdir) == 0) { \ + /* demote parent. */ \ + _VRBT_BITSUP(parent, field) ^= elmdir; \ + _VRBT_BITSUP(sib, field) ^= sibdir; \ + } else \ + _VRBT_BITSUP(sib, field) ^= sibdir; \ + elm = sib; \ } \ - break; \ - } while ((parent = VRBT_PARENT(elm, field)) != NULL); \ + \ + /* \ + * The edge descending from 'elm' away from 'parent' \ + * is short. Rotate to make 'parent' a child of 'elm', \ + * then lengthen the short edges descending from \ + * 'parent' and 'elm' to rebalance. \ + * \ + * par elm \ + * ? ? ? ? \ + * e ? ? ? \ + * elm ? ? \ + * ? ? par s \ + * ? ? ? ? \ + * ? ? e ? \ + * x s x \ + */ \ + VRBT_ROTATE(parent, elm, elmdir, field); \ + VRBT_SET_PARENT(elm, gpar, field); \ + VRBT_SWAP_CHILD(head, gpar, parent, elm, field); \ + /* \ + * An element rotated down, but not into the search \ + * path has a new, smaller subtree, so update \ + * augmentation for it. \ + */ \ + if (sib != elm) \ + (void)VRBT_AUGMENT_CHECK(sib); \ + return (parent); \ + } while (elm = parent, (parent = gpar) != NULL); \ + return (NULL); \ } +#define _VRBT_AUGMENT_WALK(elm, match, field) \ +do { \ + if (match == elm) \ + match = NULL; \ +} while (VRBT_AUGMENT_CHECK(elm) && \ + (elm = VRBT_PARENT(elm, field)) != NULL) + #define VRBT_GENERATE_REMOVE(name, type, field, attr) \ attr struct type * \ -name##_VRBT_REMOVE(struct name *head, struct type *elm) \ +name##_VRBT_REMOVE(struct name *head, struct type *out) \ { \ - struct type *child, *old, *parent, *right; \ + struct type *child = NULL, *in, *opar, *parent; \ \ - old = elm; \ - parent = VRBT_PARENT(elm, field); \ - right = VRBT_RIGHT(elm, field); \ - if (VRBT_LEFT(elm, field) == NULL) \ - elm = child = right; \ - else if (right == NULL) \ - elm = child = VRBT_LEFT(elm, field); \ - else { \ - if ((child = VRBT_LEFT(right, field)) == NULL) { \ - child = VRBT_RIGHT(right, field); \ - VRBT_RIGHT(old, field) = child; \ - parent = elm = right; \ - } else { \ - do \ - elm = child; \ - while ((child = VRBT_LEFT(elm, field)) != NULL); \ - child = VRBT_RIGHT(elm, field); \ - parent = VRBT_PARENT(elm, field); \ + child = VRBT_LEFT(out, field); \ + in = VRBT_RIGHT(out, field); \ + opar = _VRBT_UP(out, field); \ + if (in == NULL || child == NULL) { \ + in = child = (in == NULL ? child : in); \ + parent = opar = _VRBT_PTR(opar); \ + } else { \ + parent = in; \ + while (VRBT_LEFT(in, field)) \ + in = VRBT_LEFT(in, field); \ + VRBT_SET_PARENT(child, in, field); \ + VRBT_LEFT(in, field) = child; \ + child = VRBT_RIGHT(in, field); \ + if (parent != in) { \ + VRBT_SET_PARENT(parent, in, field); \ + VRBT_RIGHT(in, field) = parent; \ + parent = VRBT_PARENT(in, field); \ VRBT_LEFT(parent, field) = child; \ - VRBT_SET_PARENT(VRBT_RIGHT(old, field), elm, field);\ } \ - VRBT_SET_PARENT(VRBT_LEFT(old, field), elm, field); \ - elm->field = old->field; \ + _VRBT_UP(in, field) = opar; \ + opar = _VRBT_PTR(opar); \ } \ - VRBT_SWAP_CHILD(head, old, elm, field); \ + VRBT_SWAP_CHILD(head, opar, out, in, field); \ if (child != NULL) \ - VRBT_SET_PARENT(child, parent, field); \ + _VRBT_UP(child, field) = parent; \ + if (parent != NULL) { \ + opar = name##_VRBT_REMOVE_COLOR(head, parent, child); \ + /* if rotation has made 'parent' the root of the same \ + * subtree as before, don't re-augment it. */ \ + if (parent == in && VRBT_LEFT(parent, field) == NULL) { \ + opar = NULL; \ + parent = VRBT_PARENT(parent, field); \ + } \ + _VRBT_AUGMENT_WALK(parent, opar, field); \ + if (opar != NULL) { \ + /* \ + * Elements rotated into the search path have \ + * changed subtrees, so update augmentation for \ + * them if AUGMENT_WALK didn't. \ + */ \ + (void)VRBT_AUGMENT_CHECK(opar); \ + (void)VRBT_AUGMENT_CHECK(VRBT_PARENT(opar, field)); \ + } \ + } \ + return (out); \ +} + +#define VRBT_GENERATE_INSERT_FINISH(name, type, field, attr) \ +/* Inserts a node into the RB tree */ \ +attr struct type * \ +name##_VRBT_INSERT_FINISH(struct name *head, struct type *parent, \ + struct type **pptr, struct type *elm) \ +{ \ + struct type *tmp = NULL; \ + \ + VRBT_SET(elm, parent, field); \ + *pptr = elm; \ if (parent != NULL) \ - name##_VRBT_REMOVE_COLOR(head, parent, child); \ - return (old); \ + tmp = name##_VRBT_INSERT_COLOR(head, parent, elm); \ + _VRBT_AUGMENT_WALK(elm, tmp, field); \ + if (tmp != NULL) \ + /* \ + * An element rotated into the search path has a \ + * changed subtree, so update augmentation for it if \ + * AUGMENT_WALK didn't. \ + */ \ + (void)VRBT_AUGMENT_CHECK(tmp); \ + return (NULL); \ } #define VRBT_GENERATE_INSERT(name, type, field, cmp, attr) \ @@ -630,28 +847,20 @@ attr struct type * \ name##_VRBT_INSERT(struct name *head, struct type *elm) \ { \ struct type *tmp; \ + struct type **tmpp = &VRBT_ROOT(head); \ struct type *parent = NULL; \ - int comp = 0; \ - tmp = VRBT_ROOT(head); \ - while (tmp) { \ + \ + while ((tmp = *tmpp) != NULL) { \ parent = tmp; \ - comp = (cmp)(elm, parent); \ + __typeof(cmp(NULL, NULL)) comp = (cmp)(elm, parent); \ if (comp < 0) \ - tmp = VRBT_LEFT(tmp, field); \ + tmpp = &VRBT_LEFT(parent, field); \ else if (comp > 0) \ - tmp = VRBT_RIGHT(tmp, field); \ + tmpp = &VRBT_RIGHT(parent, field); \ else \ - return (tmp); \ + return (parent); \ } \ - VRBT_SET(elm, parent, field); \ - if (parent == NULL) \ - VRBT_ROOT(head) = elm; \ - else if (comp < 0) \ - VRBT_LEFT(parent, field) = elm; \ - else \ - VRBT_RIGHT(parent, field) = elm; \ - name##_VRBT_INSERT_COLOR(head, elm); \ - return (NULL); \ + return (name##_VRBT_INSERT_FINISH(head, parent, tmpp, elm)); \ } #define VRBT_GENERATE_FIND(name, type, field, cmp, attr) \ @@ -660,7 +869,7 @@ attr struct type * \ name##_VRBT_FIND(const struct name *head, const struct type *elm) \ { \ struct type *tmp = VRBT_ROOT(head); \ - int comp; \ + __typeof(cmp(NULL, NULL)) comp; \ while (tmp) { \ comp = cmp(elm, tmp); \ if (comp < 0) \ @@ -676,11 +885,11 @@ name##_VRBT_FIND(const struct name *head, const struct type *elm) \ #define VRBT_GENERATE_NFIND(name, type, field, cmp, attr) \ /* Finds the first node greater than or equal to the search key */ \ attr struct type * \ -name##_VRBT_NFIND(const struct name *head, const struct type *elm) \ +name##_VRBT_NFIND(struct name *head, struct type *elm) \ { \ struct type *tmp = VRBT_ROOT(head); \ struct type *res = NULL; \ - int comp; \ + __typeof(cmp(NULL, NULL)) comp; \ while (tmp) { \ comp = cmp(elm, tmp); \ if (comp < 0) { \ @@ -705,19 +914,41 @@ name##_VRBT_NEXT(struct type *elm) \ while (VRBT_LEFT(elm, field)) \ elm = VRBT_LEFT(elm, field); \ } else { \ - if (VRBT_PARENT(elm, field) && \ - (elm == VRBT_LEFT(VRBT_PARENT(elm, field), field))) \ - elm = VRBT_PARENT(elm, field); \ - else { \ - while (VRBT_PARENT(elm, field) && \ - (elm == VRBT_RIGHT(VRBT_PARENT(elm, field), field)))\ - elm = VRBT_PARENT(elm, field); \ + while (VRBT_PARENT(elm, field) && \ + (elm == VRBT_RIGHT(VRBT_PARENT(elm, field), field))) \ elm = VRBT_PARENT(elm, field); \ - } \ + elm = VRBT_PARENT(elm, field); \ } \ return (elm); \ } +#if defined(_KERNEL) && defined(DIAGNOSTIC) +#define _VRBT_ORDER_CHECK(cmp, lo, hi) do { \ + KASSERT((cmp)(lo, hi) < 0, ("out of order insertion")); \ +} while (0) +#else +#define _VRBT_ORDER_CHECK(cmp, lo, hi) do {} while (0) +#endif + +#define VRBT_GENERATE_INSERT_NEXT(name, type, field, cmp, attr) \ +/* Inserts a node into the next position in the RB tree */ \ +attr struct type * \ +name##_VRBT_INSERT_NEXT(struct name *head, \ + struct type *elm, struct type *next) \ +{ \ + struct type *tmp; \ + struct type **tmpp = &VRBT_RIGHT(elm, field); \ + \ + _VRBT_ORDER_CHECK(cmp, elm, next); \ + if (name##_VRBT_NEXT(elm) != NULL) \ + _VRBT_ORDER_CHECK(cmp, next, name##_VRBT_NEXT(elm)); \ + while ((tmp = *tmpp) != NULL) { \ + elm = tmp; \ + tmpp = &VRBT_LEFT(elm, field); \ + } \ + return (name##_VRBT_INSERT_FINISH(head, elm, tmpp, next)); \ +} + #define VRBT_GENERATE_PREV(name, type, field, attr) \ /* ARGSUSED */ \ attr struct type * \ @@ -728,19 +959,33 @@ name##_VRBT_PREV(struct type *elm) \ while (VRBT_RIGHT(elm, field)) \ elm = VRBT_RIGHT(elm, field); \ } else { \ - if (VRBT_PARENT(elm, field) && \ - (elm == VRBT_RIGHT(VRBT_PARENT(elm, field), field))) \ - elm = VRBT_PARENT(elm, field); \ - else { \ - while (VRBT_PARENT(elm, field) && \ - (elm == VRBT_LEFT(VRBT_PARENT(elm, field), field)))\ - elm = VRBT_PARENT(elm, field); \ + while (VRBT_PARENT(elm, field) && \ + (elm == VRBT_LEFT(VRBT_PARENT(elm, field), field))) \ elm = VRBT_PARENT(elm, field); \ - } \ + elm = VRBT_PARENT(elm, field); \ } \ return (elm); \ } +#define VRBT_GENERATE_INSERT_PREV(name, type, field, cmp, attr) \ +/* Inserts a node into the prev position in the RB tree */ \ +attr struct type * \ +name##_VRBT_INSERT_PREV(struct name *head, \ + struct type *elm, struct type *prev) \ +{ \ + struct type *tmp; \ + struct type **tmpp = &VRBT_LEFT(elm, field); \ + \ + _VRBT_ORDER_CHECK(cmp, prev, elm); \ + if (name##_VRBT_PREV(elm) != NULL) \ + _VRBT_ORDER_CHECK(cmp, name##_VRBT_PREV(elm), prev); \ + while ((tmp = *tmpp) != NULL) { \ + elm = tmp; \ + tmpp = &VRBT_RIGHT(elm, field); \ + } \ + return (name##_VRBT_INSERT_FINISH(head, elm, tmpp, prev)); \ +} + #define VRBT_GENERATE_MINMAX(name, type, field, attr) \ attr struct type * \ name##_VRBT_MINMAX(const struct name *head, int val) \ @@ -777,6 +1022,8 @@ name##_VRBT_REINSERT(struct name *head, struct type *elm) \ #define VRBT_INF 1 #define VRBT_INSERT(name, x, y) name##_VRBT_INSERT(x, y) +#define VRBT_INSERT_NEXT(name, x, y, z) name##_VRBT_INSERT_NEXT(x, y, z) +#define VRBT_INSERT_PREV(name, x, y, z) name##_VRBT_INSERT_PREV(x, y, z) #define VRBT_REMOVE(name, x, y) name##_VRBT_REMOVE(x, y) #define VRBT_FIND(name, x, y) name##_VRBT_FIND(x, y) #define VRBT_NFIND(name, x, y) name##_VRBT_NFIND(x, y) diff --git a/lib/libvarnishapi/vsl_dispatch.c b/lib/libvarnishapi/vsl_dispatch.c index cf0683606..361407f1b 100644 --- a/lib/libvarnishapi/vsl_dispatch.c +++ b/lib/libvarnishapi/vsl_dispatch.c @@ -232,6 +232,7 @@ vtx_keycmp(const struct vtx_key *a, const struct vtx_key *b) VRBT_GENERATE_REMOVE_COLOR(vtx_tree, vtx_key, entry, static) VRBT_GENERATE_REMOVE(vtx_tree, vtx_key, entry, static) VRBT_GENERATE_INSERT_COLOR(vtx_tree, vtx_key, entry, static) +VRBT_GENERATE_INSERT_FINISH(vtx_tree, vtx_key, entry, static) VRBT_GENERATE_INSERT(vtx_tree, vtx_key, entry, vtx_keycmp, static) VRBT_GENERATE_FIND(vtx_tree, vtx_key, entry, vtx_keycmp, static) diff --git a/lib/libvcc/vcc_acl.c b/lib/libvcc/vcc_acl.c index 811f0bf3b..818693670 100644 --- a/lib/libvcc/vcc_acl.c +++ b/lib/libvcc/vcc_acl.c @@ -150,6 +150,7 @@ vcl_acl_disjoint(const struct acl_e *ae1, const struct acl_e *ae2) } VRBT_GENERATE_INSERT_COLOR(acl_tree, acl_e, branch, static) +VRBT_GENERATE_INSERT_FINISH(acl_tree, acl_e, branch, static) VRBT_GENERATE_INSERT(acl_tree, acl_e, branch, vcl_acl_cmp, static) VRBT_GENERATE_MINMAX(acl_tree, acl_e, branch, static) VRBT_GENERATE_NEXT(acl_tree, acl_e, branch, static) From martin at varnish-software.com Mon Dec 5 17:59:06 2022 From: martin at varnish-software.com (Martin Blix Grydeland) Date: Mon, 5 Dec 2022 17:59:06 +0000 (UTC) Subject: [6.0] a9a6d98a2 Use host network in docker build instance Message-ID: <20221205175906.DCEAF65C7B@lists.varnish-cache.org> commit a9a6d98a2b1b6d4e52d5e59de7ee00145c30ea1e Author: Martin Blix Grydeland Date: Mon Dec 5 10:58:05 2022 -0700 Use host network in docker build instance diff --git a/.circleci/config.yml b/.circleci/config.yml index d4a1970f4..709916811 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -234,7 +234,7 @@ jobs: command: | docker create --name workspace -v /workspace << parameters.prefix >><< parameters.dist >>:<< parameters.release >> /bin/true docker cp /workspace workspace:/ - docker run --volumes-from workspace -w /workspace << parameters.prefix >><< parameters.dist >>:<< parameters.release >> sh -c ' + docker run --volumes-from workspace -w /workspace --network host << parameters.prefix >><< parameters.dist >>:<< parameters.release >> sh -c ' case "<< parameters.dist >>" in centos|almalinux|fedora) yum groupinstall -y "Development Tools" From dridi.boukelmoune at gmail.com Tue Dec 6 13:15:09 2022 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Tue, 6 Dec 2022 13:15:09 +0000 (UTC) Subject: [master] 5981c35a6 mgt: Remove vext_cache only when necessary Message-ID: <20221206131509.829276169A@lists.varnish-cache.org> commit 5981c35a6e3b5b09dc169aab8ae76ed45710cef6 Author: Walid Boudebouda Date: Thu Dec 1 17:37:27 2022 +0100 mgt: Remove vext_cache only when necessary Refs #3874 diff --git a/bin/varnishd/mgt/mgt_main.c b/bin/varnishd/mgt/mgt_main.c index 019da8d02..2d77b7d63 100644 --- a/bin/varnishd/mgt/mgt_main.c +++ b/bin/varnishd/mgt/mgt_main.c @@ -286,9 +286,11 @@ mgt_Cflag_atexit(void) /* Only master process */ if (getpid() != heritage.mgt_pid) return; - vext_cleanup(1); + if (arg_list_count("E")) { + vext_cleanup(1); + VJ_rmdir("vext_cache"); + } VJ_rmdir("vmod_cache"); - VJ_rmdir("vext_cache"); (void)chdir("/"); VJ_rmdir(workdir); } From phk at FreeBSD.org Tue Dec 6 13:22:05 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 6 Dec 2022 13:22:05 +0000 (UTC) Subject: [master] 04e9659f2 Change vsm->serial to uint64_t. Message-ID: <20221206132205.6468C61C74@lists.varnish-cache.org> commit 04e9659f21c4a6507bf952ea53bfe2369ef9be88 Author: Poul-Henning Kamp Date: Tue Dec 6 13:19:50 2022 +0000 Change vsm->serial to uint64_t. Neither Martin nor I can remember why this was uintptr_t in the first place, and there is no signs of pointers anywhere nearby. diff --git a/lib/libvarnishapi/vsm.c b/lib/libvarnishapi/vsm.c index af1b3b212..42d4b9337 100644 --- a/lib/libvarnishapi/vsm.c +++ b/lib/libvarnishapi/vsm.c @@ -74,13 +74,13 @@ const struct vsm_valid VSM_valid[1] = {{"valid"}}; static vlu_f vsm_vlu_func; #define VSM_PRIV_SHIFT \ - (sizeof (uintptr_t) * 4) + (sizeof (uint64_t) * 4) #define VSM_PRIV_MASK \ ((1UL << VSM_PRIV_SHIFT) - 1) #define VSM_PRIV_LOW(u) \ - ((uintptr_t)(u) & VSM_PRIV_MASK) + ((uint64_t)(u) & VSM_PRIV_MASK) #define VSM_PRIV_HIGH(u) \ - (((uintptr_t)(u) >> VSM_PRIV_SHIFT) & VSM_PRIV_MASK) + (((uint64_t)(u) >> VSM_PRIV_SHIFT) & VSM_PRIV_MASK) #define VSM_PRIV_MERGE(low, high) \ (VSM_PRIV_LOW(low) | (VSM_PRIV_LOW(high) << VSM_PRIV_SHIFT)) @@ -105,7 +105,7 @@ struct vsm_seg { size_t sz; void *b; void *e; - uintptr_t serial; + uint64_t serial; }; struct vsm_set { @@ -140,7 +140,7 @@ struct vsm { #define VSM_MAGIC 0x6e3bd69b struct vsb *diag; - uintptr_t serial; + uint64_t serial; int wdfd; struct stat wdst; From phk at FreeBSD.org Tue Dec 6 13:55:06 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 6 Dec 2022 13:55:06 +0000 (UTC) Subject: [master] 648b6895f 32-bit polish for previous commit. Message-ID: <20221206135506.DDBD163FA5@lists.varnish-cache.org> commit 648b6895fb76abfeafb44b203afbd1cf4bd3ba55 Author: Poul-Henning Kamp Date: Tue Dec 6 13:54:02 2022 +0000 32-bit polish for previous commit. diff --git a/lib/libvarnishapi/vsm.c b/lib/libvarnishapi/vsm.c index 42d4b9337..5242b0726 100644 --- a/lib/libvarnishapi/vsm.c +++ b/lib/libvarnishapi/vsm.c @@ -76,7 +76,7 @@ static vlu_f vsm_vlu_func; #define VSM_PRIV_SHIFT \ (sizeof (uint64_t) * 4) #define VSM_PRIV_MASK \ - ((1UL << VSM_PRIV_SHIFT) - 1) + ((1ULL << VSM_PRIV_SHIFT) - 1) #define VSM_PRIV_LOW(u) \ ((uint64_t)(u) & VSM_PRIV_MASK) #define VSM_PRIV_HIGH(u) \ @@ -792,7 +792,7 @@ vsm_findseg(const struct vsm *vd, const struct vsm_fantom *vf) { struct vsm_set *vs; struct vsm_seg *vg; - uintptr_t x; + uint64_t x; x = VSM_PRIV_HIGH(vf->priv); if (x == vd->serial) { From phk at FreeBSD.org Tue Dec 6 16:27:06 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 6 Dec 2022 16:27:06 +0000 (UTC) Subject: [master] 564d60d6a Change vfp_entry->priv2 to int64_t instead of intptr_t Message-ID: <20221206162706.573AB940F2@lists.varnish-cache.org> commit 564d60d6ae794c605013be5cde5a85b9593493d7 Author: Poul-Henning Kamp Date: Tue Dec 6 16:25:36 2022 +0000 Change vfp_entry->priv2 to int64_t instead of intptr_t diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h index 13fa35bcd..eb9a8f325 100644 --- a/bin/varnishd/cache/cache_filter.h +++ b/bin/varnishd/cache/cache_filter.h @@ -63,7 +63,7 @@ struct vfp_entry { enum vfp_status closed; const struct vfp *vfp; void *priv1; - intptr_t priv2; + int64_t priv2; VTAILQ_ENTRY(vfp_entry) list; uint64_t calls; uint64_t bytes_out; From phk at FreeBSD.org Tue Dec 6 18:24:07 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 6 Dec 2022 18:24:07 +0000 (UTC) Subject: [master] 2f442d974 Ssize_t is a better choice than int64_t Message-ID: <20221206182407.3DCE39BE81@lists.varnish-cache.org> commit 2f442d974820857c495060a96e6b7d4003ac11e1 Author: Poul-Henning Kamp Date: Tue Dec 6 18:23:16 2022 +0000 Ssize_t is a better choice than int64_t diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h index eb9a8f325..6ec82e0f4 100644 --- a/bin/varnishd/cache/cache_filter.h +++ b/bin/varnishd/cache/cache_filter.h @@ -63,7 +63,7 @@ struct vfp_entry { enum vfp_status closed; const struct vfp *vfp; void *priv1; - int64_t priv2; + ssize_t priv2; VTAILQ_ENTRY(vfp_entry) list; uint64_t calls; uint64_t bytes_out; From dridi.boukelmoune at gmail.com Thu Dec 8 08:12:07 2022 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Thu, 8 Dec 2022 08:12:07 +0000 (UTC) Subject: [master] 5f95d3671 vtc: Increase screen width in u11 Message-ID: <20221208081207.18CDF632F2@lists.varnish-cache.org> commit 5f95d3671bd486581e8fdf2c1023ea9772ee26aa Author: Dridi Boukelmoune Date: Thu Dec 8 09:09:59 2022 +0100 vtc: Increase screen width in u11 Otherwise a long banner may throw expectations off by one line. Fixes #3877 diff --git a/bin/varnishtest/tests/u00011.vtc b/bin/varnishtest/tests/u00011.vtc index 1561ad1e4..42152f824 100644 --- a/bin/varnishtest/tests/u00011.vtc +++ b/bin/varnishtest/tests/u00011.vtc @@ -14,7 +14,7 @@ client c1 { varnish v1 -vsl_catchup -process p1 -log {varnishadm -n ${v1_name}} -start +process p1 -log -winsz 25 120 {varnishadm -n ${v1_name}} -start process p1 -expect-text 0 1 "Type 'quit' to close CLI session." From dridi.boukelmoune at gmail.com Thu Dec 8 15:30:07 2022 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Thu, 8 Dec 2022 15:30:07 +0000 (UTC) Subject: [master] 0b8e5ed2b vtc: Typo Message-ID: <20221208153007.F1B7BA4B23@lists.varnish-cache.org> commit 0b8e5ed2b192aecc613803911bb54a868ad1a0d1 Author: Dridi Boukelmoune Date: Thu Dec 8 16:28:52 2022 +0100 vtc: Typo diff --git a/bin/varnishtest/tests/v00004.vtc b/bin/varnishtest/tests/v00004.vtc index 3e8bca71c..2635eb695 100644 --- a/bin/varnishtest/tests/v00004.vtc +++ b/bin/varnishtest/tests/v00004.vtc @@ -1,4 +1,4 @@ -varnishtest "test if our default paramers make sense ..." +varnishtest "test if our default parameters make sense ..." feature 64bit feature !sanitizer From phk at FreeBSD.org Mon Dec 12 13:40:10 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 12 Dec 2022 13:40:10 +0000 (UTC) Subject: [master] 38dd0df7d Minor NO-OP polish. Message-ID: <20221212134010.44FDA11C97C@lists.varnish-cache.org> commit 38dd0df7dc0c457dc7b121143cceabdf8fbf8fcb Author: Poul-Henning Kamp Date: Mon Dec 12 13:38:34 2022 +0000 Minor NO-OP polish. diff --git a/bin/varnishd/cache/cache_http.c b/bin/varnishd/cache/cache_http.c index 75e66ce6d..0a8186861 100644 --- a/bin/varnishd/cache/cache_http.c +++ b/bin/varnishd/cache/cache_http.c @@ -389,7 +389,7 @@ http_Teardown(struct http *hp) */ void -HTTP_Dup(struct http *to, const struct http * const fm) +HTTP_Dup(struct http *to, const struct http * fm) { assert(fm->nhd <= to->shd); diff --git a/include/vtree.h b/include/vtree.h index bfa5efcd7..66bfd44cc 100644 --- a/include/vtree.h +++ b/include/vtree.h @@ -533,7 +533,7 @@ name##_VRBT_INSERT_COLOR(struct name *head, \ /* \ * Initially, elm is a leaf. Either its parent was previously \ * a leaf, with two black null children, or an interior node \ - * with a black non-null child and a red null child. The \ + * with a black non-null child and a red null child. The \ * balance criterion "the rank of any leaf is 1" precludes the \ * possibility of two red null children for the initial parent. \ * So the first loop iteration cannot lead to accessing an \ @@ -694,12 +694,12 @@ name##_VRBT_REMOVE_COLOR(struct name *head, \ * par par \ * ? ? ? ? \ * ? sib elm ? \ - * ? / ? elm* \ - * elm elm* ? ? ? \ + * ? / ? elm* \ + * elm elm* ? ? ? \ * ? ? ? ? ? \ * ? ? z ? ? \ * x y x sib \ - * ? ? \ + * ? ? \ * ? z \ * y \ */ \ diff --git a/lib/libvarnish/vcli_proto.c b/lib/libvarnish/vcli_proto.c index d7eef5d88..e0fca5cec 100644 --- a/lib/libvarnish/vcli_proto.c +++ b/lib/libvarnish/vcli_proto.c @@ -88,8 +88,7 @@ VCLI_WriteResult(int fd, unsigned status, const char *result) len = strlen(result); - i = snprintf(res, sizeof res, - "%-3d %-8zd\n", status, len); + i = snprintf(res, sizeof res, "%-3d %-8zd\n", status, len); assert(i == CLI_LINE0_LEN); assert(strtoul(res + 3, NULL, 10) == len); diff --git a/lib/libvarnish/vjsn.c b/lib/libvarnish/vjsn.c index 62163415a..fc37b4095 100644 --- a/lib/libvarnish/vjsn.c +++ b/lib/libvarnish/vjsn.c @@ -356,6 +356,10 @@ vjsn_number(struct vjsn *js) while (*js->ptr >= '0' && *js->ptr <= '9') js->ptr++; } + /* + * The terminating NUL is supplied by the caller, once they + * have decided if they like what occupies that spot. + */ return (jsv); } diff --git a/lib/libvarnish/vsa.c b/lib/libvarnish/vsa.c index 90b2713e7..58477a002 100644 --- a/lib/libvarnish/vsa.c +++ b/lib/libvarnish/vsa.c @@ -255,7 +255,8 @@ VSA_Malloc(const void *s, unsigned sal) return (VSA_Build(NULL, s, sal)); } -/* 'd' SHALL point to vsa_suckaddr_len aligned bytes of storage +/* + * 'd' SHALL point to vsa_suckaddr_len aligned bytes of storage * * fam: address family * a / al : address and length @@ -466,4 +467,3 @@ VSA_free(const struct suckaddr **vsap) TAKE_OBJ_NOTNULL(vsa, vsap, SUCKADDR_MAGIC); free(TRUST_ME(vsa)); } - From phk at FreeBSD.org Mon Dec 12 14:54:06 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 12 Dec 2022 14:54:06 +0000 (UTC) Subject: [master] 25b2dc31d Constify the result from VPX_tlv() Message-ID: <20221212145406.B95FE6182@lists.varnish-cache.org> commit 25b2dc31d8ec92aedb9fe49e90ccc6a0702b33ca Author: Poul-Henning Kamp Date: Mon Dec 12 14:53:07 2022 +0000 Constify the result from VPX_tlv() diff --git a/bin/varnishd/proxy/cache_proxy.h b/bin/varnishd/proxy/cache_proxy.h index 6fdfe6078..413759f94 100644 --- a/bin/varnishd/proxy/cache_proxy.h +++ b/bin/varnishd/proxy/cache_proxy.h @@ -40,6 +40,6 @@ #define PP2_SUBTYPE_SSL_KEY_ALG 0x25 #define PP2_SUBTYPE_SSL_MAX 0x25 -int VPX_tlv(const struct req *req, int tlv, void **dst, int *len); +int VPX_tlv(const struct req *req, int tlv, const void **dst, int *len); void VPX_Format_Proxy(struct vsb *, int, const struct suckaddr *, const struct suckaddr *, const char *); diff --git a/bin/varnishd/proxy/cache_proxy_proto.c b/bin/varnishd/proxy/cache_proxy_proto.c index b36ae0f6b..dc3f55a77 100644 --- a/bin/varnishd/proxy/cache_proxy_proto.c +++ b/bin/varnishd/proxy/cache_proxy_proto.c @@ -286,7 +286,7 @@ vpx_tlv_itern(struct vpx_tlv_iter *vpi) (vpi->e == NULL) && vpx_tlv_itern(itv);) int -VPX_tlv(const struct req *req, int typ, void **dst, int *len) +VPX_tlv(const struct req *req, int typ, const void **dst, int *len) { struct vpx_tlv *tlv; struct vpx_tlv_iter vpi[1], vpi2[1]; diff --git a/vmod/vmod_proxy.c b/vmod/vmod_proxy.c index 099f64a0b..7da8b2ea8 100644 --- a/vmod/vmod_proxy.c +++ b/vmod/vmod_proxy.c @@ -55,11 +55,11 @@ struct pp2_tlv_ssl { static VCL_BOOL tlv_ssl_flag(VRT_CTX, int flag) { - struct pp2_tlv_ssl *dst; + const struct pp2_tlv_ssl *dst; int len; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); - if (VPX_tlv(ctx->req, PP2_TYPE_SSL, (void **)&dst, &len)) + if (VPX_tlv(ctx->req, PP2_TYPE_SSL, (const void **)&dst, &len)) return (0); return ((dst->client & flag) == flag); @@ -87,11 +87,11 @@ vmod_client_has_cert_conn(VRT_CTX) VCL_INT v_matchproto_(td_proxy_ssl_verify_result) vmod_ssl_verify_result(VRT_CTX) { - struct pp2_tlv_ssl *dst; + const struct pp2_tlv_ssl *dst; int len; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); - if (VPX_tlv(ctx->req, PP2_TYPE_SSL, (void **)&dst, &len)) + if (VPX_tlv(ctx->req, PP2_TYPE_SSL, (const void **)&dst, &len)) return (0); /* X509_V_OK */ return (vbe32dec(&dst->verify)); @@ -100,19 +100,21 @@ vmod_ssl_verify_result(VRT_CTX) static VCL_STRING tlv_string(VRT_CTX, int tlv) { - char *dst, *d; + const char *ptr; + char *d; int len; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); - if (VPX_tlv(ctx->req, tlv, (void **)&dst, &len)) + if (VPX_tlv(ctx->req, tlv, (const void **)&ptr, &len)) return (NULL); d = WS_Alloc(ctx->ws, len+1); if (d == NULL) { VRT_fail(ctx, "proxy.TLV: out of workspace"); return (NULL); } - memcpy(d, dst, len); + AN(ptr); + memcpy(d, ptr, len); d[len] = '\0'; return (d); } From phk at FreeBSD.org Tue Dec 13 09:43:06 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 13 Dec 2022 09:43:06 +0000 (UTC) Subject: [master] 26dfe252b Fix misplaced const. Message-ID: <20221213094306.905B711A8D8@lists.varnish-cache.org> commit 26dfe252bc9282809cc525e720de5c2c69b2586f Author: Poul-Henning Kamp Date: Tue Dec 13 09:42:27 2022 +0000 Fix misplaced const. Fixes #3882 diff --git a/include/vtree.h b/include/vtree.h index 66bfd44cc..d36233137 100644 --- a/include/vtree.h +++ b/include/vtree.h @@ -448,7 +448,7 @@ struct { \ #define VRBT_PROTOTYPE_INSERT(name, type, attr) \ attr struct type *name##_VRBT_INSERT(struct name *, struct type *) #define VRBT_PROTOTYPE_FIND(name, type, attr) \ - attr const struct type *name##_VRBT_FIND(const struct name *, const struct type *) + attr struct type *name##_VRBT_FIND(const struct name *, const struct type *) #define VRBT_PROTOTYPE_NFIND(name, type, attr) \ attr struct type *name##_VRBT_NFIND(struct name *, struct type *) #define VRBT_PROTOTYPE_NEXT(name, type, attr) \ @@ -462,7 +462,7 @@ struct { \ attr struct type *name##_VRBT_INSERT_PREV(struct name *, \ struct type *, struct type *) #define VRBT_PROTOTYPE_MINMAX(name, type, attr) \ - attr const struct type *name##_VRBT_MINMAX(struct name *, int) + attr struct type *name##_VRBT_MINMAX(const struct name *, int) #define VRBT_PROTOTYPE_REINSERT(name, type, attr) \ attr struct type *name##_VRBT_REINSERT(struct name *, struct type *) From phk at FreeBSD.org Tue Dec 13 15:33:05 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 13 Dec 2022 15:33:05 +0000 (UTC) Subject: [master] 403c05c1c Clarify the difference between `objiterator` and `objiterate` in function names. Message-ID: <20221213153305.6360065C59@lists.varnish-cache.org> commit 403c05c1c8110b84a33178d7603a2d7005fd1657 Author: Poul-Henning Kamp Date: Tue Dec 13 12:57:01 2022 +0000 Clarify the difference between `objiterator` and `objiterate` in function names. diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c index 2749b798e..44507eb5a 100644 --- a/bin/varnishd/cache/cache_deliver_proc.c +++ b/bin/varnishd/cache/cache_deliver_proc.c @@ -215,7 +215,7 @@ VDP_Close(struct vdp_ctx *vdc) /*--------------------------------------------------------------------*/ static int v_matchproto_(objiterate_f) -vdp_objiterator(void *priv, unsigned flush, const void *ptr, ssize_t len) +vdp_objiterate(void *priv, unsigned flush, const void *ptr, ssize_t len) { enum vdp_action act; @@ -241,7 +241,7 @@ VDP_DeliverObj(struct vdp_ctx *vdc, struct objcore *oc) AN(vdc->vsl); vdc->req = NULL; final = oc->flags & (OC_F_PRIVATE | OC_F_HFM | OC_F_HFP) ? 1 : 0; - r = ObjIterate(vdc->wrk, oc, vdc, vdp_objiterator, final); + r = ObjIterate(vdc->wrk, oc, vdc, vdp_objiterate, final); if (r < 0) return (r); return (0); diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c index 2875d2f91..343a28102 100644 --- a/bin/varnishd/cache/cache_fetch.c +++ b/bin/varnishd/cache/cache_fetch.c @@ -764,7 +764,7 @@ vbf_stp_fetchend(struct worker *wrk, struct busyobj *bo) */ static int v_matchproto_(objiterate_f) -vbf_objiterator(void *priv, unsigned flush, const void *ptr, ssize_t len) +vbf_objiterate(void *priv, unsigned flush, const void *ptr, ssize_t len) { struct busyobj *bo; ssize_t l; @@ -852,7 +852,7 @@ vbf_stp_condfetch(struct worker *wrk, struct busyobj *bo) ObjSetState(wrk, oc, BOS_STREAM); } - if (ObjIterate(wrk, stale_oc, bo, vbf_objiterator, 0)) + if (ObjIterate(wrk, stale_oc, bo, vbf_objiterate, 0)) (void)VFP_Error(bo->vfc, "Template object failed"); if (bo->vfc->failed) { From phk at FreeBSD.org Tue Dec 13 15:33:05 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 13 Dec 2022 15:33:05 +0000 (UTC) Subject: [master] 4c64c8846 No news from the CHERI front Message-ID: <20221213153305.9247765C5C@lists.varnish-cache.org> commit 4c64c88465c03d41d6fcd8c22f346f723310a972 Author: Poul-Henning Kamp Date: Tue Dec 13 15:32:10 2022 +0000 No news from the CHERI front diff --git a/doc/sphinx/phk/cheri7.rst b/doc/sphinx/phk/cheri7.rst new file mode 100644 index 000000000..222f1e141 --- /dev/null +++ b/doc/sphinx/phk/cheri7.rst @@ -0,0 +1,30 @@ +.. _phk_cheri_7: + +How Varnish met CHERI 7/N +========================= + +Not much news +------------- + +I have been occupied with other activities, so there isn't much +news about CHERI to report, except that I have still not been +able to find any actual bugs in Varnish with it. + +Having spent almost two decades with the quality knob stuck at "11" +that is how it should be, and there's no denying that I am a little +bit proud of that. + +But it also means I do not have a bug killed, classified and on +display for the rest of the world to see just how amazing CHERI is, +and why it should become standard in all computers: Embedded, +handheld, servers, development, test and production. + +Revisiting obscure corners of Varnish has caused me to commit +a few of "spit&polish" changes, including an API which should +have returned ``const`` but did not. + +But I have not given up yet, and I will find a good example of what +CHERI can do, sooner or later, but it may not be in the Varnish +source code. + +*/phk* diff --git a/doc/sphinx/phk/index.rst b/doc/sphinx/phk/index.rst index 4b792547e..0aa550b48 100644 --- a/doc/sphinx/phk/index.rst +++ b/doc/sphinx/phk/index.rst @@ -13,6 +13,7 @@ You may or may not want to know what Poul-Henning thinks. .. toctree:: :maxdepth: 1 + cheri7.rst cheri6.rst cheri5.rst cheri4.rst From phk at FreeBSD.org Mon Dec 19 12:24:09 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 19 Dec 2022 12:24:09 +0000 (UTC) Subject: [master] 89d697b28 Cherry-pick the trivial parts of zlib 1.2.13 Message-ID: <20221219122409.2BF1111CB61@lists.varnish-cache.org> commit 89d697b2812feebaf602d0f721fb89466879d0f7 Author: Poul-Henning Kamp Date: Mon Dec 19 10:36:53 2022 +0000 Cherry-pick the trivial parts of zlib 1.2.13 diff --git a/lib/libvgz/crc32.c b/lib/libvgz/crc32.c index 0f59da66a..aee728a25 100644 --- a/lib/libvgz/crc32.c +++ b/lib/libvgz/crc32.c @@ -647,8 +647,8 @@ unsigned long ZEXPORT crc32_z(crc, buf, len) len &= 7; /* Do three interleaved CRCs to realize the throughput of one crc32x - instruction per cycle. Each CRC is calcuated on Z_BATCH words. The three - CRCs are combined into a single CRC after each set of batches. */ + instruction per cycle. Each CRC is calculated on Z_BATCH words. The + three CRCs are combined into a single CRC after each set of batches. */ while (num >= 3 * Z_BATCH) { crc1 = 0; crc2 = 0; diff --git a/lib/libvgz/deflate.c b/lib/libvgz/deflate.c index 69222e189..43dede2bb 100644 --- a/lib/libvgz/deflate.c +++ b/lib/libvgz/deflate.c @@ -292,6 +292,8 @@ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, if (windowBits < 0) { /* suppress zlib wrapper */ wrap = 0; + if (windowBits < -15) + return Z_STREAM_ERROR; windowBits = -windowBits; } #ifdef GZIP @@ -690,36 +692,50 @@ int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) } /* ========================================================================= - * For the default windowBits of 15 and memLevel of 8, this function returns - * a close to exact, as well as small, upper bound on the compressed size. - * They are coded as constants here for a reason--if the #define's are - * changed, then this function needs to be changed as well. The return - * value for 15 and 8 only works for those exact settings. + * For the default windowBits of 15 and memLevel of 8, this function returns a + * close to exact, as well as small, upper bound on the compressed size. This + * is an expansion of ~0.03%, plus a small constant. * - * For any setting other than those defaults for windowBits and memLevel, - * the value returned is a conservative worst case for the maximum expansion - * resulting from using fixed blocks instead of stored blocks, which deflate - * can emit on compressed data for some combinations of the parameters. + * For any setting other than those defaults for windowBits and memLevel, one + * of two worst case bounds is returned. This is at most an expansion of ~4% or + * ~13%, plus a small constant. * - * This function could be more sophisticated to provide closer upper bounds for - * every combination of windowBits and memLevel. But even the conservative - * upper bound of about 14% expansion does not seem onerous for output buffer - * allocation. + * Both the 0.03% and 4% derive from the overhead of stored blocks. The first + * one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second + * is for stored blocks of 127 bytes (the worst case memLevel == 1). The + * expansion results from five bytes of header for each stored block. + * + * The larger expansion of 13% results from a window size less than or equal to + * the symbols buffer size (windowBits <= memLevel + 7). In that case some of + * the data being compressed may have slid out of the sliding window, impeding + * a stored block from being emitted. Then the only choice is a fixed or + * dynamic block, where a fixed block limits the maximum expansion to 9 bits + * per 8-bit byte, plus 10 bits for every block. The smallest block size for + * which this can occur is 255 (memLevel == 2). + * + * Shifts are used to approximate divisions, for speed. */ uLong ZEXPORT deflateBound(strm, sourceLen) z_streamp strm; uLong sourceLen; { deflate_state *s; - uLong complen, wraplen; + uLong fixedlen, storelen, wraplen; + + /* upper bound for fixed blocks with 9-bit literals and length 255 + (memLevel == 2, which is the lowest that may not use stored blocks) -- + ~13% overhead plus a small constant */ + fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) + + (sourceLen >> 9) + 4; - /* conservative upper bound for compressed data */ - complen = sourceLen + - ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; + /* upper bound for stored blocks with length 127 (memLevel == 1) -- + ~4% overhead plus a small constant */ + storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) + + (sourceLen >> 11) + 7; - /* if can't get parameters, return conservative bound plus zlib wrapper */ + /* if can't get parameters, return larger bound plus a zlib wrapper */ if (deflateStateCheck(strm)) - return complen + 6; + return (fixedlen > storelen ? fixedlen : storelen) + 6; /* compute wrapper length */ s = strm->state; @@ -756,11 +772,12 @@ uLong ZEXPORT deflateBound(strm, sourceLen) wraplen = 6; } - /* if not default parameters, return conservative bound */ + /* if not default parameters, return one of the conservative bounds */ if (s->w_bits != 15 || s->hash_bits != 8 + 7) - return complen + wraplen; + return (s->w_bits <= s->hash_bits ? fixedlen : storelen) + wraplen; - /* default settings: return tight bound for that case */ + /* default settings: return tight bound for that case -- ~0.03% overhead + plus a small constant */ return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + (sourceLen >> 25) + 13 - 6 + wraplen; } @@ -1353,7 +1370,8 @@ local uInt longest_match(s, cur_match) */ if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead; - Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, + "need lookahead"); do { Assert(cur_match < s->strstart, "no future"); @@ -1377,7 +1395,7 @@ local uInt longest_match(s, cur_match) /* It is not necessary to compare scan[2] and match[2] since they are * always equal when the other bytes match, given that the hash keys * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at - * strstart+3, +5, ... up to strstart+257. We check for insufficient + * strstart + 3, + 5, up to strstart + 257. We check for insufficient * lookahead only every 4th comparison; the 128th check will be made * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is * necessary to put more guard bytes at the end of the window, or @@ -1394,7 +1412,8 @@ local uInt longest_match(s, cur_match) /* The funny "do {}" generates better code on most compilers */ /* Here, scan <= window+strstart+257 */ - Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + Assert(scan <= s->window + (unsigned)(s->window_size - 1), + "wild scan"); if (*scan == *match) scan++; len = (MAX_MATCH - 1) - (int)(strend-scan); @@ -1426,7 +1445,8 @@ local uInt longest_match(s, cur_match) *++scan == *++match && *++scan == *++match && scan < strend); - Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); + Assert(scan <= s->window + (unsigned)(s->window_size - 1), + "wild scan"); len = MAX_MATCH - (int)(strend - scan); scan = strend - MAX_MATCH; @@ -1471,7 +1491,8 @@ local uInt longest_match(s, cur_match) */ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); - Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); + Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, + "need lookahead"); Assert(cur_match < s->strstart, "no future"); @@ -1717,7 +1738,7 @@ local void fill_window(s) * * deflate_stored() is written to minimize the number of times an input byte is * copied. It is most efficient with large input and output buffers, which - * maximizes the opportunites to have a single copy from next_in to next_out. + * maximizes the opportunities to have a single copy from next_in to next_out. */ local block_state deflate_stored(s, flush) deflate_state *s; @@ -2179,7 +2200,8 @@ local block_state deflate_rle(s, flush) if (s->match_length > s->lookahead) s->match_length = s->lookahead; } - Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); + Assert(scan <= s->window + (uInt)(s->window_size - 1), + "wild scan"); } /* Emit match if have run of MIN_MATCH or longer, else emit literal */ diff --git a/lib/libvgz/deflate.h b/lib/libvgz/deflate.h index 17c226113..1a06cd5f2 100644 --- a/lib/libvgz/deflate.h +++ b/lib/libvgz/deflate.h @@ -329,8 +329,8 @@ void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, # define _tr_tally_dist(s, distance, length, flush) \ { uch len = (uch)(length); \ ush dist = (ush)(distance); \ - s->sym_buf[s->sym_next++] = dist; \ - s->sym_buf[s->sym_next++] = dist >> 8; \ + s->sym_buf[s->sym_next++] = (uch)dist; \ + s->sym_buf[s->sym_next++] = (uch)(dist >> 8); \ s->sym_buf[s->sym_next++] = len; \ dist--; \ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ diff --git a/lib/libvgz/inflate.c b/lib/libvgz/inflate.c index 9577e8312..5cdbe8b95 100644 --- a/lib/libvgz/inflate.c +++ b/lib/libvgz/inflate.c @@ -171,6 +171,8 @@ int windowBits; /* extract wrap request from windowBits parameter */ if (windowBits < 0) { + if (windowBits < -15) + return Z_STREAM_ERROR; wrap = 0; windowBits = -windowBits; } diff --git a/lib/libvgz/inftrees.c b/lib/libvgz/inftrees.c index 761253942..2cac3d8be 100644 --- a/lib/libvgz/inftrees.c +++ b/lib/libvgz/inftrees.c @@ -63,7 +63,7 @@ unsigned short FAR *work; 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, - 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 199, 202}; + 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 194, 65}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, diff --git a/lib/libvgz/inftrees.h b/lib/libvgz/inftrees.h index baa53a0b1..f53665311 100644 --- a/lib/libvgz/inftrees.h +++ b/lib/libvgz/inftrees.h @@ -38,7 +38,7 @@ typedef struct { /* Maximum size of the dynamic table. The maximum number of code structures is 1444, which is the sum of 852 for literal/length codes and 592 for distance codes. These values were found by exhaustive searches using the program - examples/enough.c found in the zlib distribtution. The arguments to that + examples/enough.c found in the zlib distribution. The arguments to that program are the number of symbols, the initial root table size, and the maximum bit length of a code. "enough 286 9 15" for literal/length codes returns returns 852, and "enough 30 6 15" for distance codes returns 592. diff --git a/lib/libvgz/trees.c b/lib/libvgz/trees.c index 6ae8dcf47..1ccce63e6 100644 --- a/lib/libvgz/trees.c +++ b/lib/libvgz/trees.c @@ -312,7 +312,7 @@ local void tr_static_init() } /* =========================================================================== - * Genererate the file trees.h describing the static trees. + * Generate the file trees.h describing the static trees. */ #ifdef GEN_TREES_H # ifndef ZLIB_DEBUG @@ -807,8 +807,8 @@ local int build_bl_tree(s) /* Build the bit length tree: */ build_tree(s, (tree_desc *)(&(s->bl_desc))); - /* opt_len now includes the length of the tree representations, except - * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. + /* opt_len now includes the length of the tree representations, except the + * lengths of the bit lengths codes and the 5 + 5 + 4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format @@ -961,7 +961,10 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, s->sym_next / 3)); - if (static_lenb <= opt_lenb) opt_lenb = static_lenb; +#ifndef FORCE_STATIC + if (static_lenb <= opt_lenb || s->strategy == Z_FIXED) +#endif + opt_lenb = static_lenb; } else { Assert(buf != (char*)0, "lost buf"); @@ -982,11 +985,7 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) */ _tr_stored_block(s, buf, stored_len, last); -#ifdef FORCE_STATIC - } else if (static_lenb >= 0) { /* force static trees */ -#else - } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { -#endif + } else if (static_lenb == opt_lenb) { send_bits(s, (STATIC_TREES<<1)+last, 3); compress_block(s, (const ct_data *)static_ltree, (const ct_data *)static_dtree); @@ -1028,11 +1027,11 @@ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) int ZLIB_INTERNAL _tr_tally (s, dist, lc) deflate_state *s; unsigned dist; /* distance of matched string */ - unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ + unsigned lc; /* match length - MIN_MATCH or unmatched char (dist==0) */ { - s->sym_buf[s->sym_next++] = dist; - s->sym_buf[s->sym_next++] = dist >> 8; - s->sym_buf[s->sym_next++] = lc; + s->sym_buf[s->sym_next++] = (uch)dist; + s->sym_buf[s->sym_next++] = (uch)(dist >> 8); + s->sym_buf[s->sym_next++] = (uch)lc; if (dist == 0) { /* lc is the unmatched char */ s->dyn_ltree[lc].Freq++; @@ -1074,7 +1073,7 @@ local void compress_block(s, ltree, dtree) } else { /* Here, lc is the match length - MIN_MATCH */ code = _length_code[lc]; - send_code(s, code+LITERALS+1, ltree); /* send the length code */ + send_code(s, code + LITERALS + 1, ltree); /* send length code */ extra = extra_lbits[code]; if (extra != 0) { lc -= base_length[code]; diff --git a/lib/libvgz/vgz.h b/lib/libvgz/vgz.h index b259aac34..eff35f22a 100644 --- a/lib/libvgz/vgz.h +++ b/lib/libvgz/vgz.h @@ -281,7 +281,7 @@ ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); == 0), or after each call of deflate(). If deflate returns Z_OK and with zero avail_out, it must be called again after making room in the output buffer because there might be more output pending. See deflatePending(), - which can be used if desired to determine whether or not there is more ouput + which can be used if desired to determine whether or not there is more output in that case. Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to @@ -665,7 +665,7 @@ ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm, to dictionary. dictionary must have enough space, where 32768 bytes is always enough. If deflateGetDictionary() is called with dictionary equal to Z_NULL, then only the dictionary length is returned, and nothing is copied. - Similary, if dictLength is Z_NULL, then it is not set. + Similarly, if dictLength is Z_NULL, then it is not set. deflateGetDictionary() may return a length less than the window size, even when more than the window size in input has been provided. It may return up @@ -920,7 +920,7 @@ ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, to dictionary. dictionary must have enough space, where 32768 bytes is always enough. If inflateGetDictionary() is called with dictionary equal to Z_NULL, then only the dictionary length is returned, and nothing is copied. - Similary, if dictLength is Z_NULL, then it is not set. + Similarly, if dictLength is Z_NULL, then it is not set. inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the stream state is inconsistent. @@ -1442,12 +1442,12 @@ ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems, In the event that the end of file is reached and only a partial item is available at the end, i.e. the remaining uncompressed data length is not a - multiple of size, then the final partial item is nevetheless read into buf + multiple of size, then the final partial item is nevertheless read into buf and the end-of-file flag is set. The length of the partial item read is not provided, but could be inferred from the result of gztell(). This behavior is the same as the behavior of fread() implementations in common libraries, but it prevents the direct use of gzfread() to read a concurrently written - file, reseting and retrying on end-of-file, when size is not 1. + file, resetting and retrying on end-of-file, when size is not 1. */ ZEXTERN int ZEXPORT gzwrite OF((gzFile file, voidpc buf, unsigned len)); From phk at FreeBSD.org Mon Dec 19 12:24:09 2022 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 19 Dec 2022 12:24:09 +0000 (UTC) Subject: [master] 1c98c029c The (potentially) substantial parts of zlib 1.2.13 Message-ID: <20221219122409.525A911CB64@lists.varnish-cache.org> commit 1c98c029c0dfe615d67ce4b8d009dd2fc63c7204 Author: Poul-Henning Kamp Date: Mon Dec 19 12:23:18 2022 +0000 The (potentially) substantial parts of zlib 1.2.13 diff --git a/lib/libvgz/crc32.c b/lib/libvgz/crc32.c index aee728a25..6eab5e951 100644 --- a/lib/libvgz/crc32.c +++ b/lib/libvgz/crc32.c @@ -98,13 +98,22 @@ # endif #endif +/* If available, use the ARM processor CRC32 instruction. */ +#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) && W == 8 +# define ARMCRC32 +#endif + /* Local functions. */ local z_crc_t multmodp OF((z_crc_t a, z_crc_t b)); local z_crc_t x2nmodp OF((z_off64_t n, unsigned k)); -/* If available, use the ARM processor CRC32 instruction. */ -#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) && W == 8 -# define ARMCRC32 +#if defined(W) && (!defined(ARMCRC32) || defined(DYNAMIC_CRC_TABLE)) + local z_word_t byte_swap OF((z_word_t word)); +#endif + +#if defined(W) && !defined(ARMCRC32) + local z_crc_t crc_word OF((z_word_t data)); + local z_word_t crc_word_big OF((z_word_t data)); #endif #if defined(W) && (!defined(ARMCRC32) || defined(DYNAMIC_CRC_TABLE)) @@ -1092,7 +1101,7 @@ uLong ZEXPORT crc32_combine(crc1, crc2, len2) uLong crc2; z_off_t len2; { - return crc32_combine64(crc1, crc2, len2); + return crc32_combine64(crc1, crc2, (z_off64_t)len2); } /* ========================================================================= */ @@ -1109,11 +1118,11 @@ uLong ZEXPORT crc32_combine_gen64(len2) uLong ZEXPORT crc32_combine_gen(len2) z_off_t len2; { - return crc32_combine_gen64(len2); + return crc32_combine_gen64((z_off64_t)len2); } /* ========================================================================= */ -uLong crc32_combine_op(crc1, crc2, op) +uLong ZEXPORT crc32_combine_op(crc1, crc2, op) uLong crc1; uLong crc2; uLong op; diff --git a/lib/libvgz/deflate.c b/lib/libvgz/deflate.c index 43dede2bb..ed9ac5190 100644 --- a/lib/libvgz/deflate.c +++ b/lib/libvgz/deflate.c @@ -53,7 +53,7 @@ extern const char deflate_copyright[]; const char deflate_copyright[] = - " deflate 1.2.12 Copyright 1995-2022 Jean-loup Gailly and Mark Adler "; + " deflate 1.2.13 Copyright 1995-2022 Jean-loup Gailly and Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot @@ -90,13 +90,7 @@ local void lm_init OF((deflate_state *s)); local void putShortMSB OF((deflate_state *s, uInt b)); local void flush_pending OF((z_streamp strm)); local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); -#ifdef ASMV -# pragma message("Assembler code may have bugs -- use at your own risk") - void match_init OF((void)); /* asm code initialization */ - uInt longest_match OF((deflate_state *s, IPos cur_match)); -#else local uInt longest_match OF((deflate_state *s, IPos cur_match)); -#endif #ifdef ZLIB_DEBUG local void check_match OF((deflate_state *s, IPos start, IPos match, @@ -1304,11 +1298,6 @@ local void lm_init (s) s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; s->ins_h = 0; -#ifndef FASTEST -#ifdef ASMV - match_init(); /* initialize the asm code */ -#endif -#endif } #ifndef FASTEST @@ -1321,10 +1310,6 @@ local void lm_init (s) * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 * OUT assertion: the match length is not greater than s->lookahead. */ -#ifndef ASMV -/* For 80x86 and 680x0, an optimized version will be provided in match.asm or - * match.S. The code will be functionally equivalent. - */ local uInt longest_match(s, cur_match) deflate_state *s; IPos cur_match; /* current match */ @@ -1470,7 +1455,6 @@ local uInt longest_match(s, cur_match) if ((uInt)best_len <= s->lookahead) return (uInt)best_len; return s->lookahead; } -#endif /* ASMV */ #else /* FASTEST */ diff --git a/lib/libvgz/inftrees.c b/lib/libvgz/inftrees.c index 2cac3d8be..18faf783c 100644 --- a/lib/libvgz/inftrees.c +++ b/lib/libvgz/inftrees.c @@ -10,7 +10,7 @@ extern const char inflate_copyright[]; const char inflate_copyright[] = - " inflate 1.2.12 Copyright 1995-2022 Mark Adler "; + " inflate 1.2.13 Copyright 1995-2022 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot diff --git a/lib/libvgz/vgz.h b/lib/libvgz/vgz.h index eff35f22a..c545df07a 100644 --- a/lib/libvgz/vgz.h +++ b/lib/libvgz/vgz.h @@ -1,5 +1,5 @@ /* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.12, March 11th, 2022 + version 1.2.13, October 13th, 2022 Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler @@ -37,11 +37,11 @@ extern "C" { #endif -#define ZLIB_VERSION "1.2.12" -#define ZLIB_VERNUM 0x12c0 +#define ZLIB_VERSION "1.2.13" +#define ZLIB_VERNUM 0x12d0 #define ZLIB_VER_MAJOR 1 #define ZLIB_VER_MINOR 2 -#define ZLIB_VER_REVISION 12 +#define ZLIB_VER_REVISION 13 #define ZLIB_VER_SUBREVISION 0 /* @@ -1864,10 +1864,9 @@ ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); + ZEXTERN uLong ZEXPORT crc32_combine_gen64 OF((z_off64_t)); #endif -ZEXTERN uLong ZEXPORT crc32_combine_gen64 OF((z_off64_t)); - #if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) # ifdef Z_PREFIX_SET # define z_gzopen z_gzopen64 diff --git a/lib/libvgz/zconf.h b/lib/libvgz/zconf.h index 5113d217e..bf977d3e7 100644 --- a/lib/libvgz/zconf.h +++ b/lib/libvgz/zconf.h @@ -8,12 +8,6 @@ #ifndef ZCONF_H #define ZCONF_H -#if defined(Z_SOLO) -#include -#define Z_U8 __uint64_t -#define Z_U4 __uint32_t -#endif - /* * If you *really* need a unique prefix for all types and library functions, * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. @@ -44,6 +38,9 @@ # define crc32 z_crc32 # define crc32_combine z_crc32_combine # define crc32_combine64 z_crc32_combine64 +# define crc32_combine_gen z_crc32_combine_gen +# define crc32_combine_gen64 z_crc32_combine_gen64 +# define crc32_combine_op z_crc32_combine_op # define crc32_z z_crc32_z # define deflate z_deflate # define deflateBound z_deflateBound @@ -355,6 +352,9 @@ # ifdef FAR # undef FAR # endif +# ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +# endif # include /* No need for _export, use ZLIB.DEF instead. */ /* For complete Windows compatibility, use WINAPI, not __stdcall. */ @@ -473,11 +473,18 @@ typedef uLong FAR uLongf; # undef _LARGEFILE64_SOURCE #endif -#if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) -# define Z_HAVE_UNISTD_H +#ifndef Z_HAVE_UNISTD_H +# ifdef __WATCOMC__ +# define Z_HAVE_UNISTD_H +# endif +#endif +#ifndef Z_HAVE_UNISTD_H +# if defined(_LARGEFILE64_SOURCE) && !defined(_WIN32) +# define Z_HAVE_UNISTD_H +# endif #endif #ifndef Z_SOLO -# if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) +# if defined(Z_HAVE_UNISTD_H) # include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ # ifdef VMS # include /* for off_t */ diff --git a/lib/libvgz/zutil.c b/lib/libvgz/zutil.c index 7c300fdaf..fd0cda93b 100644 --- a/lib/libvgz/zutil.c +++ b/lib/libvgz/zutil.c @@ -62,9 +62,11 @@ uLong ZEXPORT zlibCompileFlags() #ifdef ZLIB_DEBUG flags += 1 << 8; #endif + /* #if defined(ASMV) || defined(ASMINF) flags += 1 << 9; #endif + */ #ifdef ZLIB_WINAPI flags += 1 << 10; #endif diff --git a/lib/libvgz/zutil.h b/lib/libvgz/zutil.h index 1c80e3842..03cdb4d38 100644 --- a/lib/libvgz/zutil.h +++ b/lib/libvgz/zutil.h @@ -193,6 +193,7 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); + ZEXTERN uLong ZEXPORT crc32_combine_gen64 OF((z_off_t)); #endif /* common defaults */