diff --git a/native/common/common.c b/native/common/common.c
index fe7dbc3aa..f9210fd77 100644
--- a/native/common/common.c
+++ b/native/common/common.c
@@ -393,9 +393,8 @@ node_context *find_node_context_host(request_rec *r, const proxy_balancer *balan
continue;
}
context = &context_table->context_info[j];
- ap_log_error(APLOG_MARK, APLOG_TRACE4, 0, r->server,
- "find_node_context_host: %s node: %d vhost: %d context: %s", uri, context->node, context->vhost,
- context->context);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "find_node_context_host: %s node: %d vhost: %d context: %s",
+ uri, context->node, context->vhost, context->context);
}
#endif
diff --git a/native/mod_manager/mod_manager.c b/native/mod_manager/mod_manager.c
index c67eba423..1f524df1f 100644
--- a/native/mod_manager/mod_manager.c
+++ b/native/mod_manager/mod_manager.c
@@ -1216,8 +1216,9 @@ static const proxy_worker_shared *read_shared_by_node(request_rec *r, nodeinfo_t
workers = (proxy_worker **)balancer->workers->elts;
for (j = 0; j < balancer->workers->nelts; j++, workers++) {
proxy_worker *worker = *workers;
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "read_shared_by_node: Balancer %s worker %s, %s, %d",
- balancer->s->name, worker->s->route, worker->s->hostname, worker->s->port);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "read_shared_by_node: Balancer %s worker (%d) %s, %s, %d", balancer->s->name, worker->s->index,
+ worker->s->route, worker->s->hostname, worker->s->port);
if (worker->s->port == port && strcmp(worker->s->hostname, node->mess.Host) == 0 &&
strcmp(worker->s->route, node->mess.JVMRoute) == 0) {
return worker->s;
@@ -1904,7 +1905,7 @@ static char *process_info(request_rec *r, int *errtype)
proxystat = read_shared_by_node(r, ou);
if (!proxystat) {
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "process_config: No proxystat, assum zeros");
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "process_config: No proxystat, assume zeros");
proxystat = apr_pcalloc(r->pool, sizeof(proxy_worker_shared));
}
diff --git a/native/mod_proxy_cluster/mod_proxy_cluster.c b/native/mod_proxy_cluster/mod_proxy_cluster.c
index 2e5bac3ef..29716c3b0 100644
--- a/native/mod_proxy_cluster/mod_proxy_cluster.c
+++ b/native/mod_proxy_cluster/mod_proxy_cluster.c
@@ -291,7 +291,8 @@ static apr_status_t create_worker_reuse(proxy_server_conf *conf, const char *ptr
helper = *helper_ptr;
if (helper->index == -1) {
/* We are going to reuse a removed one */
- ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, server, "create_worker_reuse: reusing removed worker for %s", url);
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, server, "create_worker_reuse: reusing removed worker (%d) for %s",
+ node->mess.id, url);
return APR_SUCCESS;
}
@@ -504,7 +505,7 @@ static apr_status_t create_worker(proxy_server_conf *conf, proxy_balancer *balan
}
/* No, it does not exist, so we will create a new one.
- * Note that the ap_proxy_get_worker and ap_proxy_define_worker aren't symetrical, and
+ * Note that the ap_proxy_get_worker and ap_proxy_define_worker aren't symmetrical, and
* this leaks via the conf->pool
*/
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, server, "create_worker: worker for %s Will create %d!!!", url,
@@ -797,13 +798,15 @@ static proxy_worker *get_worker_from_id_stat(const proxy_server_conf *conf, int
for (j = 0; j < balancer->workers->nelts; j++, ptrw = ptrw + sizew) {
proxy_worker **worker = (proxy_worker **)ptrw;
proxy_cluster_helper *helper = (proxy_cluster_helper *)(*worker)->context;
+
if ((*worker)->s == stat && helper->index == id) {
if (is_worker_empty(*worker)) {
return NULL;
- } else {
- return *worker;
}
+
+ return *worker;
}
+
if (helper->index == id) {
unpair_worker_node((*worker)->s, node);
helper->shared->index = -1;
@@ -1836,9 +1839,12 @@ static int proxy_node_isup(request_rec *r, int id, int load)
char *ptr;
if (node_storage->read_node(id, &node) != APR_SUCCESS) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server, "proxy_cluster_isup: Can't read node with id %d.", id);
return HTTP_INTERNAL_SERVER_ERROR;
}
if (node->mess.remove) {
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "proxy_cluster_isup: Node with id %d is marked for removal.", id);
return HTTP_INTERNAL_SERVER_ERROR;
}
diff --git a/test/MODCLUSTER-640/mod_lbmethod_cluster.conf b/test/MODCLUSTER-640/mod_lbmethod_cluster.conf
index 9983614ae..f83a13aaa 100644
--- a/test/MODCLUSTER-640/mod_lbmethod_cluster.conf
+++ b/test/MODCLUSTER-640/mod_lbmethod_cluster.conf
@@ -9,33 +9,32 @@ LoadModule lbmethod_cluster_module modules/mod_lbmethod_cluster.so
LoadModule watchdog_module modules/mod_watchdog.so
LogLevel info
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
ProxyPreserveHost On
UseNocanon On
-
- Listen 8090
- ManagerBalancerName mycluster
+Listen 8090
+ManagerBalancerName mycluster
- EnableWsTunnel
- WSUpgradeHeader websocket
-
- EnableMCMPReceive
-
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
-
+EnableWsTunnel
+WSUpgradeHeader websocket
+
+
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
ProxySet growth=10
diff --git a/test/MODCLUSTER-640/mod_proxy_cluster.conf b/test/MODCLUSTER-640/mod_proxy_cluster.conf
index a925f973e..f8c98f336 100644
--- a/test/MODCLUSTER-640/mod_proxy_cluster.conf
+++ b/test/MODCLUSTER-640/mod_proxy_cluster.conf
@@ -6,48 +6,34 @@ LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule manager_module modules/mod_manager.so
LoadModule proxy_cluster_module modules/mod_proxy_cluster.so
-LogLevel info
UseNocanon On
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
ProxyPreserveHost On
+Listen 8090
+ManagerBalancerName mycluster
+WSUpgradeHeader websocket
+EnableWsTunnel
-
- UseAlias On
-
+CreateBalancers 0
+EnableOptions On
-
- Listen 8090
- ManagerBalancerName mycluster
+
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
-
- CreateBalancers 0
- EnableOptions On
-
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
- EnableWsTunnel
- WSUpgradeHeader websocket
-
- EnableMCMPReceive
-
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
-
-
-
-
- ProxySet growth=10
- ProxySet lbmethod=cluster
-
-
+LogLevel info
diff --git a/test/MODCLUSTER-640/testit.sh b/test/MODCLUSTER-640/testit.sh
index 5f2799aea..9a3edf080 100755
--- a/test/MODCLUSTER-640/testit.sh
+++ b/test/MODCLUSTER-640/testit.sh
@@ -10,7 +10,9 @@ httpd_remove
rm -f nohup.out
MPC_CONF=${MPC_CONF:-MODCLUSTER-640/mod_proxy_cluster.conf}
-MPC_NAME=MODCLUSTER-640 httpd_start
+MPC_NAME=MODCLUSTER-640
+
+httpd_start
# wait until httpd is started
httpd_wait_until_ready || exit 1
diff --git a/test/MODCLUSTER-734/mod_lbmethod_cluster.conf b/test/MODCLUSTER-734/mod_lbmethod_cluster.conf
index 8daa7a60b..ed5ec994c 100644
--- a/test/MODCLUSTER-734/mod_lbmethod_cluster.conf
+++ b/test/MODCLUSTER-734/mod_lbmethod_cluster.conf
@@ -11,31 +11,32 @@ LoadModule watchdog_module modules/mod_watchdog.so
ProxyHCExpr in_maint {hc('body') !~ /Under maintenance/}
ModProxyClusterHCTemplate hcmethod=GET hcexpr=in_maint hcuri=/status.jsp
-Maxnode 505
-Maxhost 1010
-Maxcontext 1100
Listen 8090
ManagerBalancerName mycluster
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
- EnableMCMPReceive
-
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
- ProxySet growth=20
- ProxySet lbmethod=cluster
+ ProxySet growth=20
+ ProxySet lbmethod=cluster
+
+Maxnode 505
+Maxhost 1010
+Maxcontext 1100
diff --git a/test/MODCLUSTER-734/mod_proxy_cluster.conf b/test/MODCLUSTER-734/mod_proxy_cluster.conf
index 6a7cb1e04..6181bd61a 100644
--- a/test/MODCLUSTER-734/mod_proxy_cluster.conf
+++ b/test/MODCLUSTER-734/mod_proxy_cluster.conf
@@ -8,26 +8,27 @@ LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
ProxyHCExpr in_maint {hc('body') !~ /Under maintenance/}
ModProxyClusterHCTemplate hcmethod=GET hcexpr=in_maint hcuri=/status.jsp
-Maxnode 505
-Maxhost 1010
-Maxcontext 1100
Listen 8090
ManagerBalancerName mycluster
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
- EnableMCMPReceive
-
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+Maxnode 505
+Maxhost 1010
+Maxcontext 1100
diff --git a/test/MODCLUSTER-734/testit.sh b/test/MODCLUSTER-734/testit.sh
index 2da40aa7a..030ea2dd2 100755
--- a/test/MODCLUSTER-734/testit.sh
+++ b/test/MODCLUSTER-734/testit.sh
@@ -9,8 +9,7 @@ httpd_remove
# build httpd + mod_proxy_cluster
rm -f nohup.out
-MPC_CONF=${MPC_CONF:-MODCLUSTER-734/mod_proxy_cluster.conf}
-MPC_NAME=MODCLUSTER-734 httpd_start
+MPC_NAME=MODCLUSTER-734 MPC_CONF=${MPC_CONF:-MODCLUSTER-734/mod_proxy_cluster.conf} httpd_start
# wait until httpd is started
httpd_wait_until_ready || exit 1
@@ -18,7 +17,7 @@ httpd_wait_until_ready || exit 1
sleep 10
# start tomcat1 and tomcat2
-tomcat_start_two
+MPC_NAME=MODCLUSTER-734 tomcat_start_two
# wait until they are in mod_proxy_cluster tables
tomcat_wait_for_n_nodes 2
@@ -29,7 +28,7 @@ docker cp MODCLUSTER-734/ROOT_OK tomcat2:/usr/local/tomcat/webapps/ROOT
# after a while the health check will get the Under maintenance status.jsp
# and mark the node not OK.
-sleep 15
+sleep 20
curl -s -m 20 http://localhost:8090/mod_cluster_manager | grep "Status: NOTOK"
if [ $? -eq 0 ]; then
diff --git a/test/MODCLUSTER-736/testit.sh b/test/MODCLUSTER-736/testit.sh
index 97bd853be..d22486690 100644
--- a/test/MODCLUSTER-736/testit.sh
+++ b/test/MODCLUSTER-736/testit.sh
@@ -5,6 +5,10 @@
httpd_remove
tomcat_all_remove
+# We must shift tomcat ports so that they do not collide with proxy
+PORT=9000
+SHUTDOWN_PORT=7005
+
MPC_NAME=MODCLUSTER-736 httpd_start
# Start a bunch ($1, or 6 if no argument is given) of tomcat
@@ -18,7 +22,7 @@ runtomcatbatch() {
for i in $(seq $t 10);
do
- tomcat_start $i
+ MPC_NAME=MODCLUSTER-736 tomcat_start $i
done
tomcat_count=$(expr 3 + 11 - $t)
@@ -75,15 +79,8 @@ runtomcatbatch() {
singlecycle() {
echo "singlecycle: Testing tomcat$1"
R=$1
- if [ "$2" = "useran" ]; then
- R=$(expr 1 + $RANDOM % 10 + 10)
- R=$(expr $R + 2)
- # TODO
- tomcat_start $1 127.0.0.$R || exit 1
- else
- R=0
- tomcat_start $1 127.0.0.$R || exit 1
- fi
+ MPC_NAME=MODCLUSTER-736 tomcat_start $1 || exit 1
+
# Wait for it to start
echo "Testing(0) tomcat$1 waiting..."
i=0
@@ -106,7 +103,8 @@ singlecycle() {
i=0
while true
do
- curl -s -m 20 http://localhost:8090/mod_cluster_manager | grep /tomcat$1 > /dev/null
+ # we have to grep the beginning slash but also a comma at the end, otherwise hostname might be matched
+ curl -s -m 20 http://localhost:8090/mod_cluster_manager | grep "/tomcat$1," > /dev/null
if [ $? -eq 0 ]; then
break
fi
@@ -123,7 +121,7 @@ singlecycle() {
tomcat_test_app $1 || exit 1
tomcat_run_ab $1 || exit 1
echo "Testing(3) tomcat$1"
- tomcat_shutdown $1 127.0.0.$R || exit 1
+ tomcat_shutdown $1 || exit 1
while true
do
curl -s -m 20 http://localhost:8090/mod_cluster_manager | grep Node | grep tomcat$1 > /dev/null
@@ -184,25 +182,20 @@ forevertomcat() {
# Start and stop successively (one after another) $1 tomcats
cyclestomcats() {
- i=1
- while true
- do
- i=$(expr $i + 1)
- if [ $i -gt $1 ]; then
- echo "Looks OK, Done!"
- break
- fi
- singlecycle $i useran || exit 1
+ for i in $(seq 1 $1); do
+ echo -n "$i/$1: "
+ singlecycle $i || exit 1
done
+ echo "Looks OK, Done!"
}
# run test for https://issues.redhat.com/browse/MODCLUSTER-736
# basically start and stop random tomcats...
runmodcluster736() {
# start 3 tomcats
- tomcat_start 2
- tomcat_start 3
- tomcat_start 4
+ MPC_NAME=MODCLUSTER-736 tomcat_start 2
+ MPC_NAME=MODCLUSTER-736 tomcat_start 3
+ MPC_NAME=MODCLUSTER-736 tomcat_start 4
tomcat_wait_for_n_nodes 3 || exit 1
# check them
tomcat_start_webapp 2 || exit 1
@@ -237,7 +230,7 @@ runmodcluster736() {
exit 1
fi
tomcat_remove 2
- tomcat_start 5
+ MPC_NAME=MODCLUSTER-736 tomcat_start 5
tomcat_wait_for_n_nodes 3
if [ $? -ne 0 ]; then
@@ -257,7 +250,7 @@ runmodcluster736() {
fi
# we have 5 3 4 in shared memory
# read 2
- tomcat_start 2
+ MPC_NAME=MODCLUSTER-736 tomcat_start 2
tomcat_wait_for_n_nodes 4
if [ $? -ne 0 ]; then
echo "tomcat_wait_for_n_nodes 4: runmodcluster736 Failed!"
diff --git a/test/MODCLUSTER-755/mod_lbmethod_cluster.conf b/test/MODCLUSTER-755/mod_lbmethod_cluster.conf
index d9003e5c5..18c81b786 100644
--- a/test/MODCLUSTER-755/mod_lbmethod_cluster.conf
+++ b/test/MODCLUSTER-755/mod_lbmethod_cluster.conf
@@ -13,23 +13,23 @@ Maxhost 1010
Maxcontext 1100
Listen 8090
ManagerBalancerName mycluster
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
- EnableMCMPReceive
-
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
diff --git a/test/MODCLUSTER-755/mod_proxy_cluster.conf b/test/MODCLUSTER-755/mod_proxy_cluster.conf
index 5ff429ae4..f8b637229 100644
--- a/test/MODCLUSTER-755/mod_proxy_cluster.conf
+++ b/test/MODCLUSTER-755/mod_proxy_cluster.conf
@@ -5,26 +5,27 @@ LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so
LoadModule watchdog_module modules/mod_watchdog.so
LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
-Maxnode 505
-Maxhost 1010
-Maxcontext 1100
Listen 8090
ManagerBalancerName mycluster
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
EnableMCMPReceive
-
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+Maxnode 505
+Maxhost 1010
+Maxcontext 1100
diff --git a/test/MODCLUSTER-755/testit.sh b/test/MODCLUSTER-755/testit.sh
index cfc7f1e4f..9fe7c7269 100755
--- a/test/MODCLUSTER-755/testit.sh
+++ b/test/MODCLUSTER-755/testit.sh
@@ -12,11 +12,12 @@ httpd_remove
tomcat_all_remove
MPC_CONF=${MPC_CONF:-MODCLUSTER-755/mod_proxy_cluster.conf}
+
MPC_NAME=MODCLUSTER-755 httpd_start
httpd_wait_until_ready
-tomcat_start 1
+MPC_NAME=MODCLUSTER-755 tomcat_start 1
NODE_COUNT="${NODE_COUNT:-500}"
APP_COUNT="${APP_COUNT:-2}"
diff --git a/test/MODCLUSTER-785/mod_lbmethod_cluster.conf b/test/MODCLUSTER-785/mod_lbmethod_cluster.conf
index e4a1f747e..7fcc6dfcd 100644
--- a/test/MODCLUSTER-785/mod_lbmethod_cluster.conf
+++ b/test/MODCLUSTER-785/mod_lbmethod_cluster.conf
@@ -8,34 +8,35 @@ LoadModule manager_module modules/mod_manager.so
LoadModule lbmethod_cluster_module modules/mod_lbmethod_cluster.so
LoadModule watchdog_module modules/mod_watchdog.so
-Maxnode 505
-Maxhost 1010
-Maxcontext 1100
Listen 8090
ManagerBalancerName mycluster
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
EnableWsTunnel
WSUpgradeHeader websocket
- EnableMCMPReceive
-
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
ProxySet growth=10
ProxySet lbmethod=cluster
+
+Maxnode 505
+Maxhost 1010
+Maxcontext 1100
diff --git a/test/MODCLUSTER-785/mod_proxy_cluster.conf b/test/MODCLUSTER-785/mod_proxy_cluster.conf
index f3b961eac..1cb7da571 100644
--- a/test/MODCLUSTER-785/mod_proxy_cluster.conf
+++ b/test/MODCLUSTER-785/mod_proxy_cluster.conf
@@ -5,29 +5,30 @@ LoadModule manager_module modules/mod_manager.so
LoadModule proxy_cluster_module modules/mod_proxy_cluster.so
LoadModule watchdog_module modules/mod_watchdog.so
-Maxnode 505
-Maxhost 1010
-Maxcontext 1100
Listen 8090
ManagerBalancerName mycluster
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
EnableWsTunnel
WSUpgradeHeader websocket
- EnableMCMPReceive
-
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.1
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+Maxnode 505
+Maxhost 1010
+Maxcontext 1100
diff --git a/test/MODCLUSTER-785/testit.sh b/test/MODCLUSTER-785/testit.sh
index 68e7751bf..f6dbf2140 100755
--- a/test/MODCLUSTER-785/testit.sh
+++ b/test/MODCLUSTER-785/testit.sh
@@ -10,12 +10,12 @@ httpd_remove
# build httpd + mod_proxy_cluster
rm -f nohup.out
-MPC_CONF=${MPC_CONF:-MODCLUSTER-785/mod_proxy_cluster.conf}
-MPC_NAME=MODCLUSTER-785 httpd_start
+PORT=9000
+MPC_NAME=MODCLUSTER-785 MPC_CONF=${MPC_CONF:-MODCLUSTER-785/mod_proxy_cluster.conf} httpd_start
# start tomcat1 on 8080
-tomcat_start 1
+MPC_NAME=MODCLUSTER-785 tomcat_start 1
# wait until tomcat1 is in mod_proxy_cluster tables
tomcat_wait_for_n_nodes 1
@@ -32,12 +32,12 @@ if [ $? -ne 0 ]; then
fi
# Stop abruptly
-tomcat_remove 1
+tomcat_kill 1
# it return 503
# make sure we use enough workers
-ab -c 10 -n100 http://localhost:8090/app/
-http_code=$(curl -s -m 20 -o /dev/null -w "%{http_code}" http://localhost:8090/app/)
+ab -c 10 -n 100 http://localhost:8090/app/status.jsp
+http_code=$(curl -s -m 20 -o /dev/null -w "%{http_code}" http://localhost:8090/app/status.jsp)
if [ ${http_code} != 503 ]; then
echo "MODCLUSTER-785 Failed! not 503 but ${http_code}"
exit 1
@@ -46,7 +46,8 @@ fi
sleep 15
# start tomcat1 on 8080
-tomcat_start 1
+tomcat_remove 1
+MPC_NAME=MODCLUSTER-785 tomcat_start 1
# wait until tomcat1 is in mod_proxy_cluster tables
tomcat_wait_for_n_nodes 1
@@ -63,7 +64,7 @@ while true
do
sleep 1
http_code=$(curl -s -m 20 -o /dev/null -w "%{http_code}" http://localhost:8090/app/status.jsp)
- if [ ${http_code} == 200 ]; then
+ if [ ${http_code} = 200 ]; then
break
fi
i=$(expr $i + 1)
@@ -72,7 +73,7 @@ do
fi
echo "*${http_code}*"
done
-if [ ${http_code} == 503 ]; then
+if [ ${http_code} = 503 ]; then
echo "MODCLUSTER-785 Failed! return 503"
exit 1
fi
@@ -87,7 +88,7 @@ while true
do
sleep 1
http_code=$(curl -s -m 20 -o /dev/null -w "%{http_code}" http://localhost:8090/app/status.jsp)
- if [ ${http_code} == 503 ]; then
+ if [ ${http_code} = 503 ]; then
echo "MODCLUSTER-785 Failed! return 503"
exit 1
fi
diff --git a/test/MODCLUSTER-794/mod_lbmethod_cluster.conf b/test/MODCLUSTER-794/mod_lbmethod_cluster.conf
index 58923b2a7..33f56c9b7 100644
--- a/test/MODCLUSTER-794/mod_lbmethod_cluster.conf
+++ b/test/MODCLUSTER-794/mod_lbmethod_cluster.conf
@@ -11,26 +11,26 @@ LoadModule watchdog_module modules/mod_watchdog.so
ProxyPreserveHost On
Listen 8090
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
ManagerBalancerName mycluster
EnableWsTunnel
WSUpgradeHeader websocket
- EnableMCMPReceive
-
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
diff --git a/test/MODCLUSTER-794/mod_proxy_cluster.conf b/test/MODCLUSTER-794/mod_proxy_cluster.conf
index 46d596a44..5738a671e 100644
--- a/test/MODCLUSTER-794/mod_proxy_cluster.conf
+++ b/test/MODCLUSTER-794/mod_proxy_cluster.conf
@@ -6,46 +6,29 @@ LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule manager_module modules/mod_manager.so
LoadModule proxy_cluster_module modules/mod_proxy_cluster.so
-
- UseAlias On
-
-
-ProxyPreserveHost On
-
Listen 8090
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
ManagerBalancerName mycluster
-EnableWsTunnel
-WSUpgradeHeader websocket
-
- CreateBalancers 0
- EnableOptions On
-
+CreateBalancers 0
+EnableOptions On
- EnableMCMPReceive
-
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
-
-
-
- ProxySet growth=10
- ProxySet lbmethod=cluster
-
-
# This is the default value, but let's go with the explicit here
Maxnode 20
diff --git a/test/MODCLUSTER-794/testit.sh b/test/MODCLUSTER-794/testit.sh
index 5d15ebba0..38d193662 100644
--- a/test/MODCLUSTER-794/testit.sh
+++ b/test/MODCLUSTER-794/testit.sh
@@ -6,11 +6,13 @@
tomcat_all_remove
httpd_remove
-MPC_CONF=${MPC_CONF:-MODCLUSTER-794/mod_proxy_cluster.conf}
-MPC_NAME=MODCLUSTER-794 httpd_start
+# due to conflict with proxy's 8090 port we shift tomcats
+PORT=9000
+MPC_NAME=MODCLUSTER-794 MPC_CONF=${MPC_CONF:-MODCLUSTER-794/mod_proxy_cluster.conf} httpd_start
-for i in {1..20}; do
- tomcat_start $i
+
+for i in $(seq 1 20); do
+ MPC_NAME=MODCLUSTER-794 tomcat_start $i
done
sleep 20
@@ -26,5 +28,7 @@ for i in $(seq 1 10); do
sleep 5
done
+tomcat_wait_for_n_nodes 20
+
tomcat_all_remove
diff --git a/test/hangingtests.sh b/test/hangingtests.sh
index 05435af0f..7bc2633e7 100644
--- a/test/hangingtests.sh
+++ b/test/hangingtests.sh
@@ -5,22 +5,29 @@
httpd_remove
tomcat_all_remove
+tomcat_jdbsuspend_prepare() {
+ # Create files we need
+ docker exec -i tomcat$1 sh -c 'echo cont > continue.txt; echo exit >> continue.txt'
+ docker exec -i tomcat$1 sh -c 'echo suspend all > hang.txt; echo exit >> hang.txt'
+ docker exec -i tomcat$1 sh -c 'jdb -attach 6660 < continue.txt'
+}
# This should suspend the tomcat for ~ 1000 seconds ~ causing it gets removed afterwhile.
-jdbsuspend() {
- rm -f /tmp/testpipein
- mkfifo /tmp/testpipein
- rm -f /tmp/testpipeout
- mkfifo /tmp/testpipeout
- sleep 1000 > /tmp/testpipein &
- jdb -attach 6660 < /tmp/testpipein > /tmp/testpipeout &
- echo "suspend" > /tmp/testpipein
- cat < /tmp/testpipeout &
+tomcat_jdbsuspend_start() {
+ # suspend
+ docker exec -i tomcat$1 sh -c 'rm -f /tmp/testpipein'
+ docker exec -i tomcat$1 sh -c 'mkfifo /tmp/testpipein'
+ docker exec -i tomcat$1 sh -c 'rm -f /tmp/testpipeout'
+ docker exec -i tomcat$1 sh -c 'mkfifo /tmp/testpipeout'
+ docker exec -i tomcat$1 sh -c 'sleep 1000 > /tmp/testpipein &'
+ docker exec -i tomcat$1 sh -c 'jdb -attach 6660 < /tmp/testpipein > /tmp/testpipeout &'
+ docker exec -i tomcat$1 sh -c 'echo "suspend" > /tmp/testpipein'
+ docker exec -i tomcat$1 sh -c 'cat < /tmp/testpipeout &'
}
-jdbexit() {
- cat > /tmp/testpipeout &
- echo "exit" > /tmp/testpipein
+tomcat_jdbsuspend_exit() {
+ docker exec -i tomcat$1 sh -c 'cat > /tmp/testpipeout &'
+ docker exec -i tomcat$1 sh -c 'echo "exit" > /tmp/testpipein'
}
####################################
@@ -28,16 +35,6 @@ jdbexit() {
####################################
httpd_start
-# Create files we need
-cat << EOF > continue.txt
-cont
-exit
-EOF
-cat << EOF > hang.txt
-suspend all
-exit
-EOF
-
# Check that hanging tomcat will be removed
echo "hanging a tomcat checking it is removed after a while no requests"
tomcat_start_two
@@ -46,24 +43,23 @@ tomcat_wait_for_n_nodes 2 || exit 1
# curlloop.sh checks for http://localhost:8090/testapp/test.jsp
docker cp testapp tomcat1:/usr/local/tomcat/webapps
docker cp testapp tomcat2:/usr/local/tomcat/webapps
-
docker cp setenv.sh tomcat1:/usr/local/tomcat/bin
+
docker commit tomcat1 ${IMG}-debug
tomcat_remove 1
tomcat_wait_for_n_nodes 1
-docker container rm tomcat${PORT}
# Start the node.
IMG=${IMG}-debug tomcat_start 1
sleep 10
-docker exec tomcat1 jdb -attach 6660 < continue.txt
+tomcat_jdbsuspend_prepare 1
tomcat_wait_for_n_nodes 2 || exit 1
echo "2 tomcat started"
# Hang the node,
# jdb and a pipe to hang the tomcat.
-jdbsuspend
+tomcat_jdbsuspend_start 1
tomcat_wait_for_n_nodes 1 || exit 1
echo "1 tomcat hanging and gone"
-jdbexit
+tomcat_jdbsuspend_exit 1
# The tomcat is comming up again
tomcat_wait_for_n_nodes 2 || exit 1
echo "the tomcat is back"
@@ -71,7 +67,7 @@ echo "the tomcat is back"
# Same test with requests, make them in a loop
echo "hanging tomcat removed after a while with requests"
sh curlloop.sh 200 000 &
-jdbsuspend
+tomcat_jdbsuspend_start 1
tomcat_wait_for_n_nodes 1 || exit 1
ps -ef | grep curlloop | grep -v grep
if [ $? -ne 0 ]; then
@@ -79,26 +75,27 @@ if [ $? -ne 0 ]; then
exit 1
fi
ps -ef | grep curlloop | grep -v grep | awk ' { print $2 } ' | xargs kill
-jdbexit
+tomcat_jdbsuspend_exit 1
# The tomcat is comming up again
tomcat_wait_for_n_nodes 2 || exit 1
# Same test with requets but stop the other tomcat
echo "single hanging tomcat removed after a while with requests"
-tomcat_remove 2
+# kill because we want to keep it in the DNS records
+tomcat_kill 2
tomcat_wait_for_n_nodes 1 || exit 1
-jdbsuspend
+tomcat_jdbsuspend_start 1
sleep 10
sh curlloop.sh 000 404 503 &
-tomcat_wait_for_n_nodes 0 || exit 1
+tomcat_wait_for_n_nodes 0 || exit 1
ps -ef | grep curlloop | grep -v grep
if [ $? -ne 0 ]; then
echo "curlloop.sh FAILED!"
exit 1
fi
ps -ef | grep curlloop | grep -v grep | awk ' { print $2 } ' | xargs kill
-jdbexit
-# The tomcat is comming up again
+tomcat_jdbsuspend_exit 1
+# The tomcat is coming up again
tomcat_wait_for_n_nodes 1 || exit 1
# Cleanup at the end
diff --git a/test/httpd/mod_lbmethod_cluster.conf b/test/httpd/mod_lbmethod_cluster.conf
index ac03b5b45..8dcc2803d 100644
--- a/test/httpd/mod_lbmethod_cluster.conf
+++ b/test/httpd/mod_lbmethod_cluster.conf
@@ -11,28 +11,28 @@ LoadModule watchdog_module modules/mod_watchdog.so
ProxyPreserveHost On
Listen 8090
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
ManagerBalancerName mycluster
WSUpgradeHeader websocket
- EnableMCMPReceive
-
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
- ProxySet growth=20
- ProxySet lbmethod=cluster
+ ProxySet growth=20
+ ProxySet lbmethod=cluster
diff --git a/test/httpd/mod_proxy_cluster.conf b/test/httpd/mod_proxy_cluster.conf
index e6e26d8ad..acb3d3883 100644
--- a/test/httpd/mod_proxy_cluster.conf
+++ b/test/httpd/mod_proxy_cluster.conf
@@ -6,42 +6,26 @@ LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule manager_module modules/mod_manager.so
LoadModule proxy_cluster_module modules/mod_proxy_cluster.so
-
- UseAlias On
-
-
ProxyPreserveHost On
Listen 8090
-ServerName localhost
ManagerBalancerName mycluster
WSUpgradeHeader websocket
-
- CreateBalancers 0
- EnableOptions On
-
-
- EnableMCMPReceive
-
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ ServerName httpd-mod_proxy_cluster
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
-
-
-
- ProxySet growth=10
- ProxySet lbmethod=cluster
-
-
diff --git a/test/httpd/run.sh b/test/httpd/run.sh
index 42e51aa5f..0f92a4d7a 100755
--- a/test/httpd/run.sh
+++ b/test/httpd/run.sh
@@ -2,8 +2,8 @@
# copy the prepared conf file and include it
cd /test/
+FILECONF=$(filename $CONF)
if [ -f $CONF ]; then
- FILECONF=$(filename $CONF)
cp $CONF /usr/local/apache2/conf/
echo "Include conf/$FILECONF" >> /usr/local/apache2/conf/httpd.conf
else
@@ -11,8 +11,20 @@ else
exit 1
fi
+# podman networking can't do rDNS, so REMOTE_HOST is an IP address instead of
+# a hostname meaning the `Require host ...` does not work.
+# Because of that, we'll add a `Require ip ...` upon podman detection
+if grep -qc 'search dns.podman' /etc/resolv.conf; then
+ PODMAN_SUBNET=$(sed -rn 's/nameserver ([0-9]+)\.([0-9]+)\.([0-9]+).*/\1\.\2\.\3\./p' /etc/resolv.conf)
+ echo "Podman detected: Changing Require host to Require ip $PODMAN_SUBNET"
+ sed -i "s/Require host \.mod_proxy_cluster_testsuite_net/Require ip ${PODMAN_SUBNET}/" /usr/local/apache2/conf/$FILECONF
+fi
+
+if [ ! -z "$MPC_NAME" ]; then
+ sed -i "s/ServerName httpd-mod_proxy_cluster/ServerName ${MPC_NAME}/g" /usr/local/apache2/conf/$FILECONF
+fi
+
# start apache httpd server in foreground
echo "Starting httpd..."
/usr/local/apache2/bin/apachectl start
tail -f /usr/local/apache2/logs/error_log
-
diff --git a/test/includes/common.sh b/test/includes/common.sh
index f87b9af65..08b2fbf74 100644
--- a/test/includes/common.sh
+++ b/test/includes/common.sh
@@ -67,7 +67,9 @@ httpd_start() {
echo " NAME: ${MPC_NAME:-httpd-mod_proxy_cluster}"
echo "You can config those with envars MPC_SOURCES, MPC_BRANCH, MPC_CONF, MPC_NAME respectively"
fi
- docker run -d --network=host --ulimit nofile=65536:65536 --name ${MPC_NAME:-httpd-mod_proxy_cluster} \
+ docker run -d --network=mod_proxy_cluster_testsuite_net -p 8090:8090 \
+ --ulimit nofile=65536:65536 --name ${MPC_NAME:-httpd-mod_proxy_cluster} \
+ -e MPC_NAME=${MPC_NAME:-httpd-mod_proxy_cluster} \
-e CONF=${MPC_CONF:-httpd/mod_proxy_cluster.conf} \
$HTTPD_IMG
@@ -110,51 +112,48 @@ clean_and_exit() {
# By passing arguments you can change
# $1 tomcat version (default is 10.1, see tomcat/Dockerfile)
# $2 tomcat config file (default is server.xml)
-# $3 tomcat context file (default is context.xml)
tomcat_create() {
if [ -z "$1" ]; then
docker build -t $IMG -f tomcat/Containerfile tomcat/ \
- --build-arg TESTSUITE_TOMCAT_CONFIG=${2:-server.xml} \
- --build-arg TESTSUITE_TOMCAT_CONTEXT=${3:-context.xml}
+ --build-arg TESTSUITE_TOMCAT_CONFIG=server.xml
else
docker build -t $IMG -f tomcat/Containerfile tomcat/ \
--build-arg TESTSUITE_TOMCAT_VERSION=$1 \
- --build-arg TESTSUITE_TOMCAT_CONFIG=${2:-server.xml} \
- --build-arg TESTSUITE_TOMCAT_CONTEXT=${3:-context.xml}
+ --build-arg TESTSUITE_TOMCAT_CONFIG=${2:-server.xml}
fi
}
-# Start tomcat$1 container on $2 or 127.0.0.$1 if $2 is not given.
-# Ports are set by default as follows
-# * tomcat port 8080 + $1 - 1
-# * tomcat ajp port 8900 + $1 - 1
-# * tomcat shutdown port 8005 + $1 - 1
-# $1 has to be in range [1, 75].
-# Proxy's IP can be specified by $3 (default: 127.0.0.1) and its
-# port with $4 (default: 8090).
+# Start tomcat$1 container
+#
+# You can change the defaults by using following variables:
+# * PORT (default 8080)
+# * SHUTDOWN_PORT (default 8005)
+# * AJP_PORT (default 8900)
+# * PROXY_PORT (default 8090)
+# * OFFSET (applied to all ports, default $1 - 1)
+# * PROXY_NAME (default $MPC_NAME)
+# By default, only the shutdown port is exposed
tomcat_start() {
if [ -z "$1" ]; then
echo "tomcat_start called without arguments"
exit 1
fi
- if [ $1 -le 0 ] || [ $1 -gt 75 ]; then
- echo "tomcat_start called with invalid \$1 value (got $1, allowed [1, 75])"
- exit 2
- fi
- ADDR="127.0.0.$1"
- if [ ! -z "$2" ]; then
- ADDR="$2"
- fi
+ local DEFAULT_OFFSET=$(expr $1 - 1)
+ local shutport=$(expr ${SHUTDOWN_PORT:-8005} + $DEFAULT_OFFSET)
- local OFFSET=$(expr $1 - 1)
- echo "Starting tomcat$1 on $ADDR:$(expr 8080 + $OFFSET)"
- nohup docker run --network=host -e tomcat_address=$ADDR \
- -e tomcat_port_offset=$OFFSET \
+ echo "Starting tomcat$1"
+ nohup docker run --network=mod_proxy_cluster_testsuite_net \
+ -p $shutport:$shutport \
+ -e tomcat_address=tomcat$1 \
+ -e tomcat_port_offset=${OFFSET:-$DEFAULT_OFFSET} \
-e jvm_route=tomcat$1 \
- -e cluster_address=${3:-127.0.0.1} \
- -e cluster_port=${4:-8090} \
- --name tomcat$1 ${IMG} &
+ -e proxy_address=${PROXY_NAME:-$MPC_NAME} \
+ -e tomcat_port=${PORT:-8080} \
+ -e tomcat_shutdown_port=${SHUTDOWN_PORT:-8005} \
+ -e tomcat_ajp_port=${AJP_PORT:-8900} \
+ -e proxy_port=${PROXY_PORT:-8090} \
+ --name tomcat$1 ${IMG} &
ps -q $! > /dev/null
if [ $? -ne 0 ]; then
echo "docker run for tomcat$1 failed"
@@ -255,22 +254,36 @@ tomcat_start_webapp() {
# $1 tomcat number
# $2 the last segment of IPv4 addr ($1 by default)
tomcat_shutdown() {
- ADDR="127.0.0.$1"
- if [ ! -z "$2" ]; then
- ADDR=$2
+ if [ -z "$1" ]; then
+ echo "An argument is required"
+ exit 1
fi
- echo "shutting down tomcat$1 with address: $ADDR"
- echo "SHUTDOWN" | nc $ADDR $(expr 8005 + $1 - 1)
+ echo "shutting down tomcat$1"
+ echo "SHUTDOWN" | nc localhost $(expr ${SHUTDOWN_PORT:-8005} + $1 - 1)
}
# Remove the docker image tomcat$1
# Note: To succesfully remove an image it needs to be stopped
tomcat_remove() {
+ if [ -z "$1" ]; then
+ echo "An argument is required"
+ exit 1
+ fi
docker stop tomcat$1 > /dev/null 2>&1
docker rm tomcat$1
}
+# Kills the tomcat process in the given tomcat container, but does not
+# remove the container itself (it will still exist incl. docker DNS records)
+tomcat_kill() {
+ if [ -z "$1" ]; then
+ echo "An argument is required"
+ exit 1
+ fi
+ docker exec tomcat$1 pkill -9 java
+}
+
#
# Run a load test for the given tomcat$1 using ab
tomcat_run_ab() {
diff --git a/test/maintests.sh b/test/maintests.sh
index 26d7ab622..2ffa24968 100644
--- a/test/maintests.sh
+++ b/test/maintests.sh
@@ -30,7 +30,7 @@ sleep 12
# Sticky (yes, there is only one app!!!)
echotestlabel "sticky one app"
SESSIONCO=$(curl -v http://localhost:8090/testapp/test.jsp -m 20 -o /dev/null 2>&1 | grep Set-Cookie | awk '{ print $3 } ' | sed 's:;::')
-if [ "${SESSIONCO}" == "" ];then
+if [ "${SESSIONCO}" = "" ];then
echo "Failed no sessionid in curl output..."
curl -v http://localhost:8090/testapp/test.jsp
fi
@@ -55,7 +55,7 @@ NEWNODE=$(echo ${NEWCO} | awk -F = '{ print $2 }' | awk -F . '{ print $2 }')
echo "second: ${NEWCO} node: ${NEWNODE}"
echo "Checking we can reach the 2 nodes"
i=0
-while [ "${NODE}" == "${NEWNODE}" ]
+while [ "${NODE}" = "${NEWNODE}" ]
do
NEWCO=$(curl -v http://localhost:8090/testapp/test.jsp -m 20 -o /dev/null 2>&1 | grep Set-Cookie | awk '{ print $3 } ' | sed 's:;::')
NEWNODE=$(echo ${NEWCO} | awk -F = '{ print $2 }' | awk -F . '{ print $2 }')
@@ -64,7 +64,7 @@ do
echo "Can't find the 2 webapps"
exit 1
fi
- if [ "${NEWNODE}" == "" ]; then
+ if [ "${NEWNODE}" = "" ]; then
echo "Can't find node in request"
exit 1
fi
@@ -89,11 +89,10 @@ echotestlabel "sticky: stopping one node and doing requests..."
NODE=$(echo ${NEWCO} | awk -F = '{ print $2 }' | awk -F . '{ print $2 }')
echo $NODE
PORT=$(curl http://localhost:8090/mod_cluster_manager -m 20 | grep Node | grep $NODE | sed 's:)::' | awk -F : '{ print $3 } ')
-NAME=$(expr ${PORT} - 8080 + 1)
-echo "Will stop tomcat$NAME corresponding to ${NODE} and cookie: ${NEWCO}"
+NUMBER=$(expr ${PORT} - 8080 + 1)
CODE="200"
i=0
-while [ "$CODE" == "200" ]
+while [ "$CODE" = "200" ]
do
if [ $i -gt 100 ]; then
echo "Done remaining tomcat still answering!"
@@ -102,8 +101,9 @@ do
CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --cookie "${NEWCO}" http://localhost:8090/testapp/test.jsp)
if [ $i -eq 0 ]; then
# stop the tomcat
- echo "tomcat${NAME} being stopped"
- tomcat_remove $NAME
+ echo "tomcat${NUMBER} being stopped"
+ # We're going to kill instead of removal because kill only tomcat proc (keeps the container in DNS)
+ tomcat_kill $NUMBER
fi
i=$(expr $i + 1)
done
@@ -114,7 +114,8 @@ if [ ${CODE} != "200" ]; then
fi
# Restart the tomcat
-tomcat_start ${NAME}
+tomcat_remove ${NUMBER}
+tomcat_start ${NUMBER}
tomcat_wait_for_n_nodes 2
# Test a keepalived connection finds the 2 webapps on each tomcat
@@ -128,50 +129,6 @@ if [ $? -ne 0 ]; then
exit 1
fi
-#
-# Test virtual host
-echotestlabel "Testing virtual hosts"
-docker cp tomcat2:/usr/local/tomcat/conf/server.xml .
-sed '/Host name=.*/i ' server.xml > new.xml
-docker cp new.xml tomcat2:/usr/local/tomcat/conf/server.xml
-docker cp examples tomcat2:/usr/local/tomcat
-docker commit tomcat2 ${IMG}-temporary
-tomcat_remove 2
-tomcat_wait_for_n_nodes 1
-# Start the node.
-IMG=${IMG}-temporary tomcat_start 2 &
-tomcat_wait_for_n_nodes 2 || exit 1
-# Basically curl --header "Host: example.com" http://127.0.0.1:8090/test/test.jsp gives 200
-# in fact the headers are:
-# X-Forwarded-For: 127.0.0.1
-# X-Forwarded-Host: example.com
-# X-Forwarded-Server: fe80::faf4:935b:9dda:2adf
-# therefore don't forget ProxyPreserveHost On (otherwise UseAlias On failed...)
-#
-CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: example.com" http://127.0.0.1:8090/test/test.jsp)
-if [ ${CODE} != "200" ]; then
- echo "Failed can't rearch webapp at example.com: ${CODE}"
- exit 1
-fi
-# Basically curl --header "Host: localhost" http://127.0.0.1:8090/test/test.jsp gives 400
-CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: localhost" http://127.0.0.1:8090/test/test.jsp)
-if [ ${CODE} != "404" ]; then
- echo "Failed should NOT rearch webapp at localhost: ${CODE}"
- exit 1
-fi
-# Same using localhost/testapp2 and curl --header "Host: localhost" http://127.0.0.1:8090/testapp2/test.jsp
-CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: localhost" http://127.0.0.1:8090/testapp2/test.jsp)
-if [ ${CODE} != "200" ]; then
- echo "Failed can't rearch webapp at localhost: ${CODE}"
- exit 1
-fi
-# Basically curl --header "Host: example.com" http://127.0.0.1:8090/testapp2/test.jsp gives 400
-CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: example.com" http://127.0.0.1:8090/testapp2/test.jsp)
-if [ ${CODE} != "404" ]; then
- echo "Failed should NOT rearch webapp at localhost: ${CODE}"
- exit 1
-fi
-
# Shutdown the 2 tomcats
tomcat_remove 1
tomcat_remove 2
diff --git a/test/setup-dependencies.sh b/test/setup-dependencies.sh
index ed4e7b37e..5327eb921 100644
--- a/test/setup-dependencies.sh
+++ b/test/setup-dependencies.sh
@@ -1,5 +1,8 @@
#!/usr/bin/sh
+# create a docker network if it does not exist
+docker network create mod_proxy_cluster_testsuite_net > /dev/null 2>&1 || true
+
# $1 is the command, $2 is the flag used for the check (--version by default)
check_cmd() {
if [ -z "$1" ]; then
diff --git a/test/testsuite.sh b/test/testsuite.sh
index de307743b..3845adc8a 100644
--- a/test/testsuite.sh
+++ b/test/testsuite.sh
@@ -76,6 +76,8 @@ run_test maintests.sh "Main tests"
res=$(expr $res + $?)
run_test websocket/basic.sh "Websocket tests"
res=$(expr $res + $?)
+run_test usealias/testit.sh "UseAlias"
+res=$(expr $res + $?)
run_test MODCLUSTER-640/testit.sh "MODCLUSTER-640"
res=$(expr $res + $?)
run_test MODCLUSTER-734/testit.sh "MODCLUSTER-734"
@@ -91,6 +93,8 @@ res=$(expr $res + $?)
MPC_CONF=httpd/mod_lbmethod_cluster.conf run_test basetests.sh "Basic tests with mod_proxy_balancer"
res=$(expr $res + $?)
+MPC_CONF=usealias/mod_lbmethod_cluster.conf run_test usealias/testit.sh "UseAlias with mod_proxy_balancer"
+res=$(expr $res + $?)
MPC_CONF=MODCLUSTER-640/mod_lbmethod_cluster.conf run_test MODCLUSTER-640/testit.sh "MODCLUSTER-640 with mod_proxy_balancer"
res=$(expr $res + $?)
MPC_CONF=MODCLUSTER-734/mod_lbmethod_cluster.conf run_test MODCLUSTER-734/testit.sh "MODCLUSTER-734 with mod_proxy_balancer"
diff --git a/test/tomcat/Containerfile b/test/tomcat/Containerfile
index d8f784ad5..837b0cd39 100644
--- a/test/tomcat/Containerfile
+++ b/test/tomcat/Containerfile
@@ -13,15 +13,16 @@ COPY target/mod_cluster-distribution-*-tomcat-${TESTSUITE_TOMCAT_VERSION}.zip /t
RUN unzip /tmp/mod_cluster-distribution-*-tomcat-${TESTSUITE_TOMCAT_VERSION}.zip && \
cp mod_cluster-distribution-*/lib/*.jar /usr/local/tomcat/lib/
-COPY $TESTSUITE_TOMCAT_CONFIG ./conf/server.xml
-COPY $TESTSUITE_TOMCAT_CONTEXT ./conf/context.xml
+COPY $TESTSUITE_TOMCAT_CONFIG ./conf/server.xml
COPY --chmod=755 start.sh ./
-ENV cluster_port=8090
-ENV cluster_address=127.0.0.1
+ENV proxy_port=8090
+ENV proxy_address=httpd-mod_proxy_cluster
ENV jvm_route=
-ENV tomcat_address=
+ENV tomcat_address=localhost
+ENV tomcat_port=8080
+ENV tomcat_shutdown_port=8005
+ENV tomcat_ajp_port=8900
ENV tomcat_port_offset=0
CMD ["./start.sh"]
-
diff --git a/test/tomcat/context.xml b/test/tomcat/context.xml
deleted file mode 100644
index ae7987847..000000000
--- a/test/tomcat/context.xml
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-
-
- WEB-INF/web.xml
- WEB-INF/tomcat-web.xml
- ${catalina.base}/conf/web.xml
-
-
-
-
-
-
diff --git a/test/tomcat/server.xml b/test/tomcat/server.xml
index 0ea050ab0..57fbda5e8 100644
--- a/test/tomcat/server.xml
+++ b/test/tomcat/server.xml
@@ -19,14 +19,12 @@
define subcomponents such as "Valves" at this level.
Documentation at /docs/config/server.html
-->
-
+
-
+
@@ -55,75 +53,18 @@
-->
-
-
-
-
-
-
-
-
-
-
-
@@ -156,15 +97,9 @@
resourceName="UserDatabase"/>
-
-
-
-
diff --git a/test/tomcat/start.sh b/test/tomcat/start.sh
index e7f0d5cae..f6c9fda58 100644
--- a/test/tomcat/start.sh
+++ b/test/tomcat/start.sh
@@ -1,21 +1,23 @@
#!/bin/bash
-sed -i "s/tomcat_address/${tomcat_address:-127.0.0.1}/g" ./conf/server.xml
-sed -i "s/port_offset/${tomcat_port_offset:-0}/g" ./conf/server.xml
-sed -i "s/proxy_port/${cluster_port:-8090}/g" ./conf/server.xml
-sed -i "s/proxy_address/${cluster_address:-127.0.0.1}/g" ./conf/server.xml
-sed -i "s/proxy_address/${cluster_address:-127.0.0.1}/g" ./conf/context.xml
-
if [ ! -z ${jvm_route} ]; then
- sed -i "/" ./conf/server.xml
+ sed -i "/" ./conf/server.xml
fi
+sed -i "s/tomcat_address/${tomcat_address}/g" ./conf/server.xml
+sed -i "s/tomcat_port/${tomcat_port}/g" ./conf/server.xml
+sed -i "s/tomcat_shutdown_port/${tomcat_shutdown_port}/g" ./conf/server.xml
+sed -i "s/tomcat_ajp_port/${tomcat_ajp_port}/g" ./conf/server.xml
+sed -i "s/port_offset/${tomcat_port_offset}/g" ./conf/server.xml
+sed -i "s/proxy_port/${proxy_port}/g" ./conf/server.xml
+sed -i "s/proxy_address/${proxy_address}/g" ./conf/server.xml
+
ls lib/jakartaee-migration-*.jar
if [ $? = 0 ]; then
- rm lib/mod_cluster-container-tomcat-9.0-*.Final-SNAPSHOT.jar
mkdir webapps-javaee
-else
- rm lib/mod_cluster-container-tomcat-10.1-*.Final-SNAPSHOT.jar
fi
+# spawn the tomcat in a separate shell
bin/catalina.sh run
+# just stay around even when the tomcat process gets killed
+while true; do cat /dev/null; done;
diff --git a/test/examples/test/test.jsp b/test/usealias/examples/test/test.jsp
similarity index 100%
rename from test/examples/test/test.jsp
rename to test/usealias/examples/test/test.jsp
diff --git a/test/usealias/mod_lbmethod_cluster.conf b/test/usealias/mod_lbmethod_cluster.conf
new file mode 100644
index 000000000..e575fd4ec
--- /dev/null
+++ b/test/usealias/mod_lbmethod_cluster.conf
@@ -0,0 +1,39 @@
+LoadModule proxy_module modules/mod_proxy.so
+LoadModule proxy_http_module modules/mod_proxy_http.so
+LoadModule proxy_balancer_module modules/mod_proxy_balancer.so
+LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so
+
+LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
+LoadModule manager_module modules/mod_manager.so
+LoadModule lbmethod_cluster_module modules/mod_lbmethod_cluster.so
+LoadModule watchdog_module modules/mod_watchdog.so
+
+UseAlias On
+ProxyPreserveHost On
+
+Listen 8090
+ServerName httpd-mod_proxy_cluster
+ManagerBalancerName mycluster
+WSUpgradeHeader websocket
+
+
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+
+
+ ProxySet growth=20
+ ProxySet lbmethod=cluster
+
diff --git a/test/usealias/mod_proxy_cluster.conf b/test/usealias/mod_proxy_cluster.conf
new file mode 100644
index 000000000..3c8110bff
--- /dev/null
+++ b/test/usealias/mod_proxy_cluster.conf
@@ -0,0 +1,33 @@
+LoadModule watchdog_module modules/mod_watchdog.so
+LoadModule proxy_module modules/mod_proxy.so
+LoadModule proxy_http_module modules/mod_proxy_http.so
+LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so
+LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
+LoadModule manager_module modules/mod_manager.so
+LoadModule proxy_cluster_module modules/mod_proxy_cluster.so
+
+UseAlias On
+ProxyPreserveHost On
+
+
+Listen 8090
+ManagerBalancerName mycluster
+WSUpgradeHeader websocket
+
+
+ ServerName httpd-mod_proxy_cluster
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
diff --git a/test/usealias/server.xml b/test/usealias/server.xml
new file mode 100644
index 000000000..51fdb26b6
--- /dev/null
+++ b/test/usealias/server.xml
@@ -0,0 +1,118 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ localhost
+
+
+
+
+
+
+
+
+
diff --git a/test/usealias/testit.sh b/test/usealias/testit.sh
new file mode 100644
index 000000000..d08f91cdf
--- /dev/null
+++ b/test/usealias/testit.sh
@@ -0,0 +1,88 @@
+#!/usr/bin/sh
+
+. includes/common.sh
+
+# remove possibly running containers
+httpd_remove
+tomcat_all_remove
+
+# Make sure to use custom conf with UseAlias On
+MPC_CONF=usealias/mod_proxy_cluster.conf httpd_start
+
+
+tomcat_start 1
+
+tomcat_wait_for_n_nodes 1
+
+# Test virtual host
+echo "Create a temporary image with a custom server.xml"
+
+docker cp usealias/server.xml tomcat1:/usr/local/tomcat/conf/server.xml
+# This app will have alias = example.com
+docker cp usealias/examples tomcat1:/usr/local/tomcat
+# This app will have two aliases: localhost, tomcat1
+docker cp usealias/examples/test tomcat1:/usr/local/tomcat/webapps/webapp
+docker commit tomcat1 ${IMG}-temporary
+# TODO: Without shutdown only the examples.com VHost is available, looks fishy
+tomcat_shutdown 1
+
+tomcat_wait_for_n_nodes 0
+tomcat_remove 1
+
+# Start the node.
+echo "Start custom tomcat image as tomcat1"
+IMG=${IMG}-temporary tomcat_start 1
+
+tomcat_wait_for_n_nodes 1
+sleep 20
+
+# Let's test now that
+# curl --header "Host: example.com" http://localhost:8090/test/test.jsp
+# gives 200
+# in fact the headers are:
+# X-Forwarded-For: localhost
+# X-Forwarded-Host: example.com
+#
+# Don't forget ProxyPreserveHost On (otherwise UseAlias On failed...)
+#
+# Basically each Host header value is compared with the aliases of the VHost corresponding
+# to the request and UseAlias makes sure there's a match (if not, 404 is returned)
+
+CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: example.com" http://localhost:8090/test/test.jsp)
+if [ ${CODE} != "200" ]; then
+ echo "Failed can't reach webapp at example.com: ${CODE}"
+ exit 1
+fi
+
+CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: localhost" http://localhost:8090/test/test.jsp)
+if [ ${CODE} != "404" ]; then
+ echo "Failed should NOT reach webapp at localhost: ${CODE}"
+ exit 1
+fi
+
+CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: tomcat1" http://localhost:8090/test/test.jsp)
+if [ ${CODE} != "404" ]; then
+ echo "Failed should NOT reach webapp at localhost: ${CODE}"
+ exit 1
+fi
+
+CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: localhost" http://localhost:8090/webapp/test.jsp)
+if [ ${CODE} != "200" ]; then
+ echo "Failed can't reach webapp at localhost: ${CODE}"
+ exit 1
+fi
+
+CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: tomcat1" http://localhost:8090/webapp/test.jsp)
+if [ ${CODE} != "200" ]; then
+ echo "Failed can't reach webapp at localhost: ${CODE}"
+ exit 1
+fi
+
+CODE=$(curl -s -o /dev/null -m 20 -w "%{http_code}" --header "Host: example.com" http://localhost:8090/webapp/test.jsp)
+if [ ${CODE} != "404" ]; then
+ echo "Failed should NOT reach webapp at localhost: ${CODE}"
+ exit 1
+fi
+
+tomcat_all_remove
+docker image rm ${IMG}-temporary
diff --git a/test/websocket/basic.sh b/test/websocket/basic.sh
index f312d5415..09fa2822d 100644
--- a/test/websocket/basic.sh
+++ b/test/websocket/basic.sh
@@ -7,10 +7,7 @@ httpd_remove
tomcat_all_remove
# run a fresh httpd
-httpd_start
-
-docker cp websocket/mod_proxy_cluster.conf $MPC_NAME:/usr/local/apache2/conf/mod_proxy_cluster.conf
-docker exec $MPC_NAME /usr/local/apache2/bin/apachectl restart
+MPC_CONF=websocket/mod_proxy_cluster.conf httpd_start
tomcat_start_two || exit 1
tomcat_wait_for_n_nodes 2 || exit 1
@@ -19,8 +16,8 @@ tomcat_wait_for_n_nodes 2 || exit 1
# Now try to test the websocket
echo "Testing websocket"
# The websocket-hello app is at: https://github.com/jfclere/httpd_websocket
-# we must check whether webapps-javaee exists, if yes, we mut use it becuase the app is javax
-docker exec tomcat2 sh -c 'ls webapps-javaee'
+# we must check whether webapps-javaee exists, if yes, we mut use it because the app is javax
+docker exec tomcat2 sh -c 'ls webapps-javaee > /dev/null 2>&1'
if [ $? = 0 ]; then
docker cp websocket/websocket-hello-0.0.1.war tomcat1:/usr/local/tomcat/webapps-javaee
docker cp websocket/websocket-hello-0.0.1.war tomcat2:/usr/local/tomcat/webapps-javaee
diff --git a/test/websocket/mod_proxy_cluster.conf b/test/websocket/mod_proxy_cluster.conf
index af7902d42..34719588d 100644
--- a/test/websocket/mod_proxy_cluster.conf
+++ b/test/websocket/mod_proxy_cluster.conf
@@ -6,43 +6,39 @@ LoadModule slotmem_shm_module modules/mod_slotmem_shm.so
LoadModule manager_module modules/mod_manager.so
LoadModule proxy_cluster_module modules/mod_proxy_cluster.so
-
- UseAlias On
-
-
ProxyPreserveHost On
Listen 8090
-ServerName localhost
+ServerName httpd-mod_proxy_cluster
ManagerBalancerName mycluster
+WSUpgradeHeader websocket
+EnableWsTunnel
- CreateBalancers 0
- EnableOptions On
+ CreateBalancers 0
+ EnableOptions On
-EnableWsTunnel
-WSUpgradeHeader websocket
- EnableMCMPReceive
-
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
-
- SetHandler mod_cluster-manager
- Require ip 127.0.0.
- Require ip ::1
- # This one is used in GH Actions
- Require ip 172.17.
-
+ EnableMCMPReceive
+
+ # For podman, this gets changed to IP in httpd/run.sh
+ Require host .mod_proxy_cluster_testsuite_net
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
+
+ SetHandler mod_cluster-manager
+ # _gateway is the hostname used through the docker port forward into the custom network
+ Require host _gateway
+ Require local
+
-
- ProxySet growth=10
- ProxySet lbmethod=cluster
-
+
+ ProxySet growth=10
+ ProxySet lbmethod=cluster
+