diff --git a/e2e.mk b/e2e.mk index fe0d12e3210..adf56a351fc 100644 --- a/e2e.mk +++ b/e2e.mk @@ -54,6 +54,6 @@ test-e2e-custom-image: ## Run e2e tests with a custom image format (use MANAGER_ echo "MANAGER_IMAGE must be set"; \ exit 1; \ fi - $(MAKE) set-manifest-image MANIFEST_IMG=$(shell echo $(MANAGER_IMAGE) | cut -d: -f1) MANIFEST_TAG=$(shell echo $(MANAGER_IMAGE) | cut -d: -f2) TARGET_RESOURCE="./config/capz/manager_image_patch.yaml" + $(MAKE) set-manifest-image MANIFEST_IMG=$(shell echo $${MANAGER_IMAGE%:*}) MANIFEST_TAG=$(shell echo $${MANAGER_IMAGE##*:}) TARGET_RESOURCE="./config/capz/manager_image_patch.yaml" $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./config/capz/manager_pull_policy.yaml" PULL_POLICY=IfNotPresent $(MAKE) test-e2e-run; diff --git a/scripts/ci-conformance.sh b/scripts/ci-conformance.sh index cf924802d38..a65a95d18e7 100755 --- a/scripts/ci-conformance.sh +++ b/scripts/ci-conformance.sh @@ -38,6 +38,8 @@ source "${REPO_ROOT}/hack/ensure-go.sh" source "${REPO_ROOT}/hack/ensure-tags.sh" # shellcheck source=hack/util.sh source "${REPO_ROOT}/hack/util.sh" +# shellcheck source=./scripts/ci-e2e-lib.sh +source "${REPO_ROOT}/scripts/ci-e2e-lib.sh" # Verify the required Environment Variables are present. capz::util::ensure_azure_envs @@ -95,6 +97,11 @@ capz::ci-conformance::cleanup() { trap capz::ci-conformance::cleanup EXIT +# pre-pull all the images that will be used in the e2e, thus making the actual test run +# less sensible to the network speed. This includes: +# - cert-manager images +kind:prepullAdditionalImages + if [[ "${WINDOWS}" == "true" ]]; then make test-windows-upstream else diff --git a/scripts/ci-e2e-lib.sh b/scripts/ci-e2e-lib.sh new file mode 100644 index 00000000000..38e9e7f936c --- /dev/null +++ b/scripts/ci-e2e-lib.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Copyright 2025 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# kind:prepullAdditionalImages pre-pull all the additional (not Kindest/node) images that will be used in the e2e, thus making +# the actual test run less sensible to the network speed. +kind:prepullAdditionalImages () { + # Pulling cert manager images so we can pre-load in kind nodes + kind::prepullImage "quay.io/jetstack/cert-manager-cainjector:v1.19.1" + kind::prepullImage "quay.io/jetstack/cert-manager-webhook:v1.19.1" + kind::prepullImage "quay.io/jetstack/cert-manager-controller:v1.19.1" + + # Pull all images defined in DOCKER_PRELOAD_IMAGES. + for IMAGE in $(grep DOCKER_PRELOAD_IMAGES: < "$E2E_CONF_FILE" | sed -E 's/.*\[(.*)\].*/\1/' | tr ',' ' '); do + kind::prepullImage "${IMAGE}" + done +} + +# kind:prepullImage pre-pull a docker image if no already present locally. +# The result will be available in the retVal value which is accessible from the caller. +kind::prepullImage () { + local image=$1 + image="${image//+/_}" + + retVal=0 + if [[ "$(docker images -q "$image" 2> /dev/null)" == "" ]]; then + echo "+ Pulling $image" + docker pull "$image" || retVal=$? + else + echo "+ image $image already present in the system, skipping pre-pull" + fi +} diff --git a/scripts/ci-e2e.sh b/scripts/ci-e2e.sh index ccd35cc1763..32a8ff0c481 100755 --- a/scripts/ci-e2e.sh +++ b/scripts/ci-e2e.sh @@ -35,6 +35,8 @@ source "${REPO_ROOT}/hack/ensure-go.sh" source "${REPO_ROOT}/hack/ensure-tags.sh" # shellcheck source=hack/util.sh source "${REPO_ROOT}/hack/util.sh" +# shellcheck source=./scripts/ci-e2e-lib.sh +source "${REPO_ROOT}/scripts/ci-e2e-lib.sh" # Verify the required Environment Variables are present. capz::util::ensure_azure_envs @@ -81,6 +83,11 @@ capz::ci-e2e::cleanup() { make test-e2e-run-cleanup || true } +# pre-pull all the images that will be used in the e2e, thus making the actual test run +# less sensible to the network speed. This includes: +# - cert-manager images +kind:prepullAdditionalImages + trap capz::ci-e2e::cleanup EXIT # Image is configured as `${CONTROLLER_IMG}-${ARCH}:${TAG}` where `CONTROLLER_IMG` is defaulted to `${REGISTRY}/${IMAGE_NAME}`. if [[ "${BUILD_MANAGER_IMAGE}" == "false" ]]; then diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 97eff3cfebf..7db7414bdf4 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -30,6 +30,7 @@ import ( "github.com/Azure/azure-service-operator/v2/pkg/common/config" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -100,6 +101,28 @@ var _ = Describe("Workload cluster creation", func() { Expect(os.Setenv(ClusterIdentityName, identityName)).To(Succeed()) Expect(os.Setenv(ClusterIdentityNamespace, defaultNamespace)).To(Succeed()) additionalCleanup = nil + + validating := &admissionregistrationv1.ValidatingWebhookConfigurationList{} + Expect(bootstrapClusterProxy.GetClient().List(ctx, validating)).To(Succeed()) + mutating := &admissionregistrationv1.MutatingWebhookConfigurationList{} + Expect(bootstrapClusterProxy.GetClient().List(ctx, mutating)).To(Succeed()) + + Logf("Looking for webhooks to have a caBundle") + for _, w := range validating.Items { + for _, webhook := range w.Webhooks { + if len(webhook.ClientConfig.CABundle) == 0 { + Logf("validatingwebhookconfiguration %s webhook %s has no caBundle", w.Name, webhook.Name) + } + } + } + for _, w := range mutating.Items { + for _, webhook := range w.Webhooks { + if len(webhook.ClientConfig.CABundle) == 0 { + Logf("mutatingwebhookconfiguration %s webhook %s has no caBundle", w.Name, webhook.Name) + } + } + } + Logf("Done looking for webhooks to have a caBundle") }) AfterEach(func() { @@ -174,6 +197,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Creating a private cluster from the management cluster", func() { AzurePrivateClusterSpec(ctx, func() AzurePrivateClusterSpecInput { @@ -205,7 +229,7 @@ var _ = Describe("Workload cluster creation", func() { specName, withNamespace(namespace.Name), withClusterName(clusterName), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(2), withControlPlaneInterval(specName, "wait-control-plane-ha"), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ @@ -221,6 +245,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -291,7 +316,7 @@ var _ = Describe("Workload cluster creation", func() { withFlavor("azure-cni-v1"), withNamespace(namespace.Name), withClusterName(clusterName), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(2), withControlPlaneInterval(specName, "wait-control-plane-ha"), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ @@ -307,6 +332,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("can expect VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -366,6 +392,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("can create and access a load balancer", func() { AzureLBSpec(ctx, func() AzureLBSpecInput { @@ -404,6 +431,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("can create and access a load balancer", func() { AzureLBSpec(ctx, func() AzureLBSpecInput { @@ -441,6 +469,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("can create and access a load balancer", func() { AzureLBSpec(ctx, func() AzureLBSpecInput { @@ -463,7 +492,7 @@ var _ = Describe("Workload cluster creation", func() { withFlavor("ipv6"), withNamespace(namespace.Name), withClusterName(clusterName), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(1), withControlPlaneInterval(specName, "wait-control-plane-ha"), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ @@ -479,6 +508,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -536,6 +566,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -592,6 +623,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -668,6 +700,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -712,6 +745,7 @@ var _ = Describe("Workload cluster creation", func() { withMachinePoolInterval(specName, "wait-machine-pool-nodes"), withControlPlaneInterval(specName, "wait-control-plane"), ), result) + return By("Verifying machinepool can scale out and in", func() { AzureMachinePoolsSpec(ctx, func() AzureMachinePoolsSpecInput { @@ -779,6 +813,7 @@ var _ = Describe("Workload cluster creation", func() { ) clusterctl.ApplyClusterTemplateAndWait(ctx, clusterTemplate, result) + return // This test should be first to make sure that the template re-applied here matches the current // state of the cluster exactly. @@ -851,6 +886,7 @@ var _ = Describe("Workload cluster creation", func() { WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady, }), ), result) + return By("Exercising machine pools", func() { AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput { @@ -979,6 +1015,7 @@ var _ = Describe("Workload cluster creation", func() { WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady, }), ), result) + return By("Performing ClusterClass operations on the cluster", func() { AKSClusterClassSpec(ctx, func() AKSClusterClassInput { @@ -1013,6 +1050,7 @@ var _ = Describe("Workload cluster creation", func() { WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady, }), ), result) + return By("Exercising machine pools", func() { AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput { @@ -1040,7 +1078,7 @@ var _ = Describe("Workload cluster creation", func() { withFlavor("dual-stack"), withNamespace(namespace.Name), withClusterName(clusterName), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(1), withControlPlaneInterval(specName, "wait-control-plane-ha"), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ @@ -1056,6 +1094,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -1127,6 +1166,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -1170,7 +1210,7 @@ var _ = Describe("Workload cluster creation", func() { withFlavor("topology-rke2"), withNamespace(namespace.Name), withClusterName(clusterName), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(1), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ WaitForControlPlaneInitialized: func(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) { @@ -1180,6 +1220,7 @@ var _ = Describe("Workload cluster creation", func() { }, }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { @@ -1225,6 +1266,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying extendedLocation property in Azure VMs is corresponding to extendedLocation property in edgezone yaml file", func() { AzureEdgeZoneClusterSpec(ctx, func() AzureEdgeZoneClusterSpecInput { @@ -1252,7 +1294,7 @@ var _ = Describe("Workload cluster creation", func() { specName, withNamespace(namespace.Name), withClusterName(clusterName), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(2), withControlPlaneInterval(specName, "wait-control-plane-ha"), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ @@ -1268,6 +1310,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Probing workload cluster with APIServerILB feature gate", func() { AzureAPIServerILBSpec(ctx, func() AzureAPIServerILBSpecInput { @@ -1303,7 +1346,7 @@ var _ = Describe("Workload cluster creation", func() { withFlavor("apiserver-ilb"), withNamespace(namespace.Name), withClusterName(clusterName), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(2), withControlPlaneInterval(specName, "wait-control-plane-ha"), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ @@ -1319,6 +1362,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Probing workload cluster with APIServerILB feature gate", func() { AzureAPIServerILBSpec(ctx, func() AzureAPIServerILBSpecInput { @@ -1351,7 +1395,7 @@ var _ = Describe("Workload cluster creation", func() { withNamespace(namespace.Name), withClusterName(clusterName), withFlavor("azl3"), - withControlPlaneMachineCount(3), + withControlPlaneMachineCount(1), withWorkerMachineCount(2), withControlPlaneInterval(specName, "wait-control-plane-ha"), withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{ @@ -1367,6 +1411,7 @@ var _ = Describe("Workload cluster creation", func() { }) }), ), result) + return By("Verifying expected VM extensions are present on the node", func() { AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput { diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index b89ed3fb755..089bde916bc 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -11,6 +11,12 @@ images: loadBehavior: tryLoad - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.4.1 loadBehavior: tryLoad + - name: quay.io/jetstack/cert-manager-cainjector:v1.19.1 + loadBehavior: tryLoad + - name: quay.io/jetstack/cert-manager-webhook:v1.19.1 + loadBehavior: tryLoad + - name: quay.io/jetstack/cert-manager-controller:v1.19.1 + loadBehavior: tryLoad providers: - name: cluster-api diff --git a/test/e2e/conformance_test.go b/test/e2e/conformance_test.go index f6d3442fc4c..fc00e7113c9 100644 --- a/test/e2e/conformance_test.go +++ b/test/e2e/conformance_test.go @@ -30,6 +30,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" "github.com/onsi/gomega/gmeasure" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -144,6 +145,29 @@ var _ = Describe("Conformance Tests", func() { Expect(err).NotTo(HaveOccurred()) stopwatch := experiment.NewStopwatch() + + validating := &admissionregistrationv1.ValidatingWebhookConfigurationList{} + Expect(bootstrapClusterProxy.GetClient().List(ctx, validating)).To(Succeed()) + mutating := &admissionregistrationv1.MutatingWebhookConfigurationList{} + Expect(bootstrapClusterProxy.GetClient().List(ctx, mutating)).To(Succeed()) + + Logf("Looking for webhooks to have a caBundle") + for _, w := range validating.Items { + for _, webhook := range w.Webhooks { + if len(webhook.ClientConfig.CABundle) == 0 { + Logf("validatingwebhookconfiguration %s webhook %s has no caBundle", w.Name, webhook.Name) + } + } + } + for _, w := range mutating.Items { + for _, webhook := range w.Webhooks { + if len(webhook.ClientConfig.CABundle) == 0 { + Logf("mutatingwebhookconfiguration %s webhook %s has no caBundle", w.Name, webhook.Name) + } + } + } + Logf("Done looking for webhooks to have a caBundle") + clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput( specName, withFlavor(flavor), @@ -157,6 +181,7 @@ var _ = Describe("Conformance Tests", func() { }), ), result) stopwatch.Record("cluster creation") + return workloadProxy := bootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterName) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 195904779b9..6d76d49c208 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -32,6 +32,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" @@ -39,6 +41,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" ) func init() { @@ -125,6 +128,49 @@ var _ = SynchronizedAfterSuite(func() { // After each ParallelNode. }, func() { // After all ParallelNodes. + framework.DumpAllResources(context.Background(), framework.DumpAllResourcesInput{ + Lister: bootstrapClusterProxy.GetClient(), + KubeConfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + ClusterctlConfigPath: clusterctlConfigPath, + Namespace: "thesearentnamespaced", + LogPath: filepath.Join(artifactFolder, "clusters", "bootstrap", "resources"), + IncludeTypes: []ctrl.TypeMeta{ + { + APIVersion: "admissionregistration.k8s.io/v1", + Kind: "ValidatingWebhookConfiguration", + }, + { + APIVersion: "admissionregistration.k8s.io/v1", + Kind: "MutatingWebhookConfiguration", + }, + }, + }) + framework.DumpAllResources(context.Background(), framework.DumpAllResourcesInput{ + Lister: bootstrapClusterProxy.GetClient(), + KubeConfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + ClusterctlConfigPath: clusterctlConfigPath, + Namespace: "capz-system", + LogPath: filepath.Join(artifactFolder, "clusters", "bootstrap", "resources"), + IncludeTypes: []ctrl.TypeMeta{ + { + APIVersion: "v1", + Kind: "Pod", + }, + }, + }) + framework.DumpAllResources(context.Background(), framework.DumpAllResourcesInput{ + Lister: bootstrapClusterProxy.GetClient(), + KubeConfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + ClusterctlConfigPath: clusterctlConfigPath, + Namespace: "capi-system", + LogPath: filepath.Join(artifactFolder, "clusters", "bootstrap", "resources"), + IncludeTypes: []ctrl.TypeMeta{ + { + APIVersion: "v1", + Kind: "Pod", + }, + }, + }) By("Tearing down the management cluster") if !skipCleanup { @@ -199,6 +245,52 @@ func initBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config * AddonProviders: config.AddonProviders(), LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), }, config.GetIntervals(bootstrapClusterProxy.GetName(), "wait-controllers")...) + + certManager := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "cert-manager", + Name: "cert-manager", + }, + } + Expect(bootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(certManager), certManager)).To(Succeed()) + framework.WatchDeploymentLogsByName(ctx, framework.WatchDeploymentLogsByNameInput{ + GetLister: bootstrapClusterProxy.GetClient(), + Cache: bootstrapClusterProxy.GetCache(ctx), + ClientSet: bootstrapClusterProxy.GetClientSet(), + Deployment: certManager, + LogPath: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "logs", certManager.GetNamespace()), + }) + + certManagerCAInjector := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "cert-manager", + Name: "cert-manager-cainjector", + }, + } + Expect(bootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(certManagerCAInjector), certManagerCAInjector)).To(Succeed()) + framework.WatchDeploymentLogsByName(ctx, framework.WatchDeploymentLogsByNameInput{ + GetLister: bootstrapClusterProxy.GetClient(), + Cache: bootstrapClusterProxy.GetCache(ctx), + ClientSet: bootstrapClusterProxy.GetClientSet(), + Deployment: certManagerCAInjector, + LogPath: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "logs", certManagerCAInjector.GetNamespace()), + }) + + certManagerWebhook := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "cert-manager", + Name: "cert-manager-webhook", + }, + } + Expect(bootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKeyFromObject(certManagerWebhook), certManagerWebhook)).To(Succeed()) + framework.WatchDeploymentLogsByName(ctx, framework.WatchDeploymentLogsByNameInput{ + GetLister: bootstrapClusterProxy.GetClient(), + Cache: bootstrapClusterProxy.GetCache(ctx), + ClientSet: bootstrapClusterProxy.GetClientSet(), + Deployment: certManagerWebhook, + LogPath: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName(), "logs", certManagerWebhook.GetNamespace()), + }) + } func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) {