[master] 5db61e045 Tune test cases c97-c99

Martin Blix Grydeland martin at varnish-software.com
Fri Apr 26 08:51:08 UTC 2019


commit 5db61e0457334392fdc4da8bb5a065a3a191692a
Author: Martin Blix Grydeland <martin at varnish-software.com>
Date:   Thu Apr 25 14:47:39 2019 +0200

    Tune test cases c97-c99
    
    This reverts some of the previous attempts to get these test cases stable,
    as those attempts actually prevented the testing of the desired code
    paths.
    
    Also make the test cases wait until the required requests are on the
    waitinglist before continuing.

diff --git a/bin/varnishtest/tests/c00097.vtc b/bin/varnishtest/tests/c00097.vtc
index 52469a690..afc77fbd9 100644
--- a/bin/varnishtest/tests/c00097.vtc
+++ b/bin/varnishtest/tests/c00097.vtc
@@ -1,49 +1,64 @@
 varnishtest "Streaming delivery and waitinglist rushing"
 
-barrier b1 sock 4
-barrier b2 sock 4
+# Barrier to make sure that c1 connects to s1
+barrier b1 cond 2
+
+# Barrier to make sure that all requests are on waitinglist before
+# HSH_Unbusy is called
+barrier b2 cond 2
+
+# Barrier to control that all requests start streaming before the object
+# finishes. This tests that waitinglists are rushed before
+# HSH_DerefObjCore().
+barrier b3 sock 4
 
 server s1 {
 	rxreq
+	barrier b1 sync
+	barrier b2 sync
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
-	barrier b1 sync
+	barrier b3 sync
 	chunkedlen 10
 	chunkedlen 0
 } -start
 
-varnish v1 -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -vcl+backend {
+varnish v1 -arg "-p thread_pools=1" -arg "-p thread_pool_min=20" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -arg "-p debug=+waitinglist" -vcl+backend {
 	import vtc;
 	sub vcl_hit {
-		vtc.barrier_sync("${b1_sock}");
+		vtc.barrier_sync("${b3_sock}");
 	}
 } -start
 
 client c1 {
 	txreq
-	rxresp -no_obj
-	barrier b2 sync
-	rxrespbody
+	rxresp
 } -start
 
+barrier b1 sync
+
 client c2 {
-	barrier b2 sync
 	txreq
 	rxresp
 } -start
 
 client c3 {
-	barrier b2 sync
 	txreq
 	rxresp
 } -start
 
 client c4 {
-	barrier b2 sync
 	txreq
 	rxresp
 } -start
 
+# Wait until c2-c4 are on the waitinglist
+delay 1
+varnish v1 -expect busy_sleep == 3
+
+# Open up the response headers from s1, and as a result HSH_Unbusy
+barrier b2 sync
+
 client c1 -wait
 client c2 -wait
 client c3 -wait
diff --git a/bin/varnishtest/tests/c00098.vtc b/bin/varnishtest/tests/c00098.vtc
index 32a809766..5b34d3a75 100644
--- a/bin/varnishtest/tests/c00098.vtc
+++ b/bin/varnishtest/tests/c00098.vtc
@@ -1,13 +1,24 @@
 varnishtest "Hit-for-pass and waitinglist rushing"
 
-barrier b2 cond 6
+# Barrier to make sure that s1 is run first
+barrier b1 cond 2
+
+# Barrier to make sure that all requests are on waitinglist before
+# HSH_Unbusy is called
+barrier b2 cond 2
+
+# Barrier to control that all backends are reached before any request
+# finishes. This tests that waitinglists are rushed before
+# HSH_DerefObjCore().
 barrier b3 cond 6
 
 server s1 {
 	rxreq
+	barrier b1 sync
+	barrier b2 sync
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
-	barrier b2 sync
+	barrier b3 sync
 	chunkedlen 10
 	chunkedlen 0
 } -start
@@ -16,7 +27,7 @@ server s2 {
 	rxreq
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
-	barrier b2 sync
+	barrier b3 sync
 	chunkedlen 10
 	chunkedlen 0
 } -start
@@ -25,7 +36,7 @@ server s3 {
 	rxreq
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
-	barrier b2 sync
+	barrier b3 sync
 	chunkedlen 10
 	chunkedlen 0
 } -start
@@ -34,7 +45,7 @@ server s4 {
 	rxreq
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
-	barrier b2 sync
+	barrier b3 sync
 	chunkedlen 10
 	chunkedlen 0
 } -start
@@ -43,7 +54,7 @@ server s5 {
 	rxreq
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
-	barrier b2 sync
+	barrier b3 sync
 	chunkedlen 10
 	chunkedlen 0
 } -start
@@ -52,13 +63,12 @@ server s6 {
 	rxreq
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
-	barrier b2 sync
+	barrier b3 sync
 	chunkedlen 10
 	chunkedlen 0
 } -start
 
-varnish v1 -arg "-p thread_pool_min=20" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -vcl+backend {
-	import vtc;
+varnish v1 -arg "-p thread_pools=1" -arg "-p thread_pool_min=30" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -arg "-p debug=+waitinglist" -vcl+backend {
 	sub vcl_backend_fetch {
 		if (bereq.http.client == "1") {
 			set bereq.backend = s1;
@@ -81,50 +91,47 @@ varnish v1 -arg "-p thread_pool_min=20" -arg "-p rush_exponent=2" -arg "-p debug
 
 client c1 {
 	txreq -url /hfp -hdr "Client: 1"
-	rxresp -no_obj
-	barrier b3 sync
+	rxresp
 } -start
 
+# This makes sure that c1->s1 is done first
+barrier b1 sync
+
 client c2 {
-	barrier b3 sync
 	txreq -url /hfp -hdr "Client: 2"
 	rxresp
 } -start
 
 client c3 {
-	barrier b3 sync
 	txreq -url /hfp -hdr "Client: 3"
 	rxresp
 } -start
 
 client c4 {
-	barrier b3 sync
 	txreq -url /hfp -hdr "Client: 4"
 	rxresp
 } -start
 
 client c5 {
-	barrier b3 sync
 	txreq -url /hfp -hdr "Client: 5"
 	rxresp
 } -start
 
 client c6 {
-	barrier b3 sync
 	txreq -url /hfp -hdr "Client: 6"
 	rxresp
 } -start
 
+# Wait until c2-c6 are on the waitinglist
+delay 1
+varnish v1 -expect busy_sleep == 5
+
+# Open up the response headers from s1, and as a result HSH_Unbusy
+barrier b2 sync
+
 client c1 -wait
 client c2 -wait
 client c3 -wait
 client c4 -wait
 client c5 -wait
 client c6 -wait
-
-server s1 -wait
-server s2 -wait
-server s3 -wait
-server s4 -wait
-server s5 -wait
-server s6 -wait
diff --git a/bin/varnishtest/tests/c00099.vtc b/bin/varnishtest/tests/c00099.vtc
index 58363ffc3..4bbd904a0 100644
--- a/bin/varnishtest/tests/c00099.vtc
+++ b/bin/varnishtest/tests/c00099.vtc
@@ -1,10 +1,21 @@
 varnishtest "Hit-for-miss and waitinglist rushing"
 
+# Barrier to make sure that s1 is run first
+barrier b1 cond 2
+
+# Barrier to make sure that all requests are on waitinglist before
+# HSH_Unbusy is called
+barrier b2 cond 2
+
+# Barrier to control that all backends are reached before any request
+# finishes. This tests that waitinglists are rushed before
+# HSH_DerefObjCore().
 barrier b3 cond 6
-barrier b4 cond 6
 
 server s1 {
 	rxreq
+	barrier b1 sync
+	barrier b2 sync
 	txresp -nolen -hdr "Transfer-Encoding: chunked"
 	chunkedlen 10
 	barrier b3 sync
@@ -57,8 +68,7 @@ server s6 {
 	chunkedlen 0
 } -start
 
-varnish v1 -arg "-p thread_pool_min=20" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -vcl+backend {
-	import vtc;
+varnish v1 -arg "-p thread_pools=1" -arg "-p thread_pool_min=30" -arg "-p rush_exponent=2" -arg "-p debug=+syncvsl" -arg "-p debug=+waitinglist" -vcl+backend {
 	sub vcl_backend_fetch {
 		if (bereq.http.client == "1") {
 			set bereq.backend = s1;
@@ -81,50 +91,47 @@ varnish v1 -arg "-p thread_pool_min=20" -arg "-p rush_exponent=2" -arg "-p debug
 
 client c1 {
 	txreq -url /hfm -hdr "Client: 1"
-	rxresp -no_obj
-	barrier b4 sync
+	rxresp
 } -start
 
+# This makes sure that c1->s1 is done first
+barrier b1 sync
+
 client c2 {
-	barrier b4 sync
 	txreq -url /hfm -hdr "Client: 2"
 	rxresp
 } -start
 
 client c3 {
-	barrier b4 sync
 	txreq -url /hfm -hdr "Client: 3"
 	rxresp
 } -start
 
 client c4 {
-	barrier b4 sync
 	txreq -url /hfm -hdr "Client: 4"
 	rxresp
 } -start
 
 client c5 {
-	barrier b4 sync
 	txreq -url /hfm -hdr "Client: 5"
 	rxresp
 } -start
 
 client c6 {
-	barrier b4 sync
 	txreq -url /hfm -hdr "Client: 6"
 	rxresp
 } -start
 
+# Wait until c2-c6 are on the waitinglist
+delay 1
+varnish v1 -expect busy_sleep == 5
+
+# Open up the response headers from s1, and as a result HSH_Unbusy
+barrier b2 sync
+
 client c1 -wait
 client c2 -wait
 client c3 -wait
 client c4 -wait
 client c5 -wait
 client c6 -wait
-
-server s1 -wait
-server s2 -wait
-server s3 -wait
-server s4 -wait
-server s5 -wait
-server s6 -wait


More information about the varnish-commit mailing list