From 7a76d576c5b4c2237387a4732aa3ac827bca33eb Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Mon, 10 Nov 2025 14:17:01 +0100 Subject: [PATCH 1/5] lint: add import aliases for CAPI --- .golangci.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.golangci.yml b/.golangci.yml index 29f85ed5e..45be0534c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -77,6 +77,14 @@ linters: alias: kerrors - pkg: sigs.k8s.io/controller-runtime alias: ctrl + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta2" + alias: clusterv1 + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta1" + alias: clusterv1beta1 + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + alias: v1beta1patch + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + alias: v1beta1conditions no-unaliased: true exclusions: generated: lax From 675bba6272e415ac5b7a43558229356904dc50d8 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Mon, 10 Nov 2025 14:17:20 +0100 Subject: [PATCH 2/5] GENERATED: make lint-fix --- api/v1beta1/gcpcluster_types.go | 8 +- api/v1beta1/gcpclustertemplate_types.go | 4 +- api/v1beta1/types.go | 4 +- cloud/interfaces.go | 8 +- cloud/scope/cluster.go | 14 ++-- cloud/scope/machine.go | 10 +-- cloud/scope/machine_test.go | 10 +-- cloud/scope/managedcluster.go | 14 ++-- cloud/scope/managedcontrolplane.go | 28 +++---- cloud/scope/managedmachinepool.go | 26 +++---- cloud/scope/managedmachinepool_test.go | 8 +- .../compute/firewalls/reconcile_test.go | 8 +- .../compute/instances/reconcile_test.go | 28 +++---- .../compute/loadbalancers/reconcile_test.go | 14 ++-- .../compute/networks/reconcile_test.go | 8 +- .../compute/subnets/reconcile_test.go | 8 +- .../services/container/clusters/reconcile.go | 76 +++++++++---------- .../services/container/nodepools/reconcile.go | 74 +++++++++--------- cloud/services/shared/machinepool.go | 6 +- controllers/gcpcluster_controller.go | 10 +-- controllers/gcpmachine_controller.go | 10 +-- .../gcpmachine_controller_unit_test.go | 18 ++--- controllers/suite_test.go | 4 +- exp/api/v1beta1/conditions_consts.go | 18 ++--- exp/api/v1beta1/gcpmanagedcluster_types.go | 12 +-- .../v1beta1/gcpmanagedcontrolplane_types.go | 10 +-- .../v1beta1/gcpmanagedmachinepool_types.go | 8 +- exp/api/v1beta1/types_template.go | 4 +- .../gke/api/v1beta1/gkeconfig_types.go | 4 +- .../gke/controllers/gkeconfig_controller.go | 10 +-- .../gcpmanagedcluster_controller.go | 10 +-- .../gcpmanagedcontrolplane_controller.go | 8 +- .../gcpmanagedmachinepool_controller.go | 28 +++---- main.go | 6 +- pkg/capiutils/predicates.go | 10 +-- pkg/capiutils/utils.go | 30 ++++---- 36 files changed, 278 insertions(+), 278 deletions(-) diff --git a/api/v1beta1/gcpcluster_types.go b/api/v1beta1/gcpcluster_types.go index f9728fc91..88d930530 100644 --- a/api/v1beta1/gcpcluster_types.go +++ b/api/v1beta1/gcpcluster_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -37,7 +37,7 @@ type GCPClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // NetworkSpec encapsulates all things related to GCP network. // +optional @@ -77,8 +77,8 @@ type GCPClusterSpec struct { // GCPClusterStatus defines the observed state of GCPCluster. type GCPClusterStatus struct { - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Network Network `json:"network,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` + Network Network `json:"network,omitempty"` // Bastion Instance `json:"bastion,omitempty"` Ready bool `json:"ready"` diff --git a/api/v1beta1/gcpclustertemplate_types.go b/api/v1beta1/gcpclustertemplate_types.go index c1c325a12..4f8a3dc2b 100644 --- a/api/v1beta1/gcpclustertemplate_types.go +++ b/api/v1beta1/gcpclustertemplate_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GCPClusterTemplateSpec defines the desired state of GCPClusterTemplate. @@ -31,7 +31,7 @@ type GCPClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` Spec GCPClusterSpec `json:"spec"` } diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index 27019aff9..4d94c8f2c 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -19,7 +19,7 @@ package v1beta1 import ( "fmt" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GCPMachineTemplateResource describes the data needed to create am GCPMachine from a template. @@ -27,7 +27,7 @@ type GCPMachineTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` // Spec is the specification of the desired behavior of the machine. Spec GCPMachineSpec `json:"spec"` diff --git a/cloud/interfaces.go b/cloud/interfaces.go index c7cf79021..14ac7e2db 100644 --- a/cloud/interfaces.go +++ b/cloud/interfaces.go @@ -24,7 +24,7 @@ import ( "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" corev1 "k8s.io/api/core/v1" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // Cloud alias for cloud.Cloud interface. @@ -61,15 +61,15 @@ type ClusterGetter interface { SkipFirewallRuleCreation() bool Network() *infrav1.Network AdditionalLabels() infrav1.Labels - FailureDomains() clusterv1.FailureDomains - ControlPlaneEndpoint() clusterv1.APIEndpoint + FailureDomains() clusterv1beta1.FailureDomains + ControlPlaneEndpoint() clusterv1beta1.APIEndpoint ResourceManagerTags() infrav1.ResourceManagerTags LoadBalancer() infrav1.LoadBalancerSpec } // ClusterSetter is an interface which can set cluster information. type ClusterSetter interface { - SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) + SetControlPlaneEndpoint(endpoint clusterv1beta1.APIEndpoint) } // Cluster is an interface which can get and set cluster information. diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 6fc3ef4c8..e52a8fc97 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -27,7 +27,7 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -36,7 +36,7 @@ import ( type ClusterScopeParams struct { GCPServices Client client.Client - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster GCPCluster *infrav1.GCPCluster } @@ -78,7 +78,7 @@ type ClusterScope struct { client client.Client patchHelper *patch.Helper - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster GCPCluster *infrav1.GCPCluster GCPServices } @@ -184,7 +184,7 @@ func (s *ClusterScope) ResourceManagerTags() infrav1.ResourceManagerTags { } // ControlPlaneEndpoint returns the cluster control-plane endpoint. -func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { +func (s *ClusterScope) ControlPlaneEndpoint() clusterv1beta1.APIEndpoint { endpoint := s.GCPCluster.Spec.ControlPlaneEndpoint endpoint.Port = 443 if c := s.Cluster.Spec.ClusterNetwork; c != nil { @@ -194,7 +194,7 @@ func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { } // FailureDomains returns the cluster failure domains. -func (s *ClusterScope) FailureDomains() clusterv1.FailureDomains { +func (s *ClusterScope) FailureDomains() clusterv1beta1.FailureDomains { return s.GCPCluster.Status.FailureDomains } @@ -208,12 +208,12 @@ func (s *ClusterScope) SetReady() { } // SetFailureDomains sets cluster failure domains. -func (s *ClusterScope) SetFailureDomains(fd clusterv1.FailureDomains) { +func (s *ClusterScope) SetFailureDomains(fd clusterv1beta1.FailureDomains) { s.GCPCluster.Status.FailureDomains = fd } // SetControlPlaneEndpoint sets cluster control-plane endpoint. -func (s *ClusterScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { +func (s *ClusterScope) SetControlPlaneEndpoint(endpoint clusterv1beta1.APIEndpoint) { s.GCPCluster.Spec.ControlPlaneEndpoint = endpoint } diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index c4bc0bfe6..c259b0698 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/cloud/providerid" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -45,7 +45,7 @@ import ( type MachineScopeParams struct { Client client.Client ClusterGetter cloud.ClusterGetter - Machine *clusterv1.Machine + Machine *clusterv1beta1.Machine GCPMachine *infrav1.GCPMachine } @@ -81,7 +81,7 @@ type MachineScope struct { client client.Client patchHelper *patch.Helper ClusterGetter cloud.ClusterGetter - Machine *clusterv1.Machine + Machine *clusterv1beta1.Machine GCPMachine *infrav1.GCPMachine } @@ -150,8 +150,8 @@ func (m *MachineScope) Role() string { } // IsControlPlaneMachine checks machine is a control plane node. -func IsControlPlaneMachine(machine *clusterv1.Machine) bool { - _, ok := machine.Labels[clusterv1.MachineControlPlaneLabel] +func IsControlPlaneMachine(machine *clusterv1beta1.Machine) bool { + _, ok := machine.Labels[clusterv1beta1.MachineControlPlaneLabel] return ok } diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index 96d6c4e65..909690f57 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -28,8 +28,8 @@ func TestMachineLocalSSDDiskType(t *testing.T) { // New test machine, needed as parameter. failureDomain := "example.com" - testMachine := clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ + testMachine := clusterv1beta1.Machine{ + Spec: clusterv1beta1.MachineSpec{ FailureDomain: &failureDomain, }, } @@ -87,8 +87,8 @@ func TestInstanceNetworkInterfaceAliasIPRangesSpec(t *testing.T) { // Test machine parameter failureDomain := "us-central1-a" - testMachine := clusterv1.Machine{ - Spec: clusterv1.MachineSpec{ + testMachine := clusterv1beta1.Machine{ + Spec: clusterv1beta1.MachineSpec{ FailureDomain: &failureDomain, }, } diff --git a/cloud/scope/managedcluster.go b/cloud/scope/managedcluster.go index b3095374c..79b620651 100644 --- a/cloud/scope/managedcluster.go +++ b/cloud/scope/managedcluster.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -36,7 +36,7 @@ import ( type ManagedClusterScopeParams struct { GCPServices Client client.Client - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane } @@ -80,7 +80,7 @@ type ManagedClusterScope struct { client client.Client patchHelper *patch.Helper - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane GCPServices @@ -172,14 +172,14 @@ func (s *ManagedClusterScope) ResourceManagerTags() infrav1.ResourceManagerTags } // ControlPlaneEndpoint returns the cluster control-plane endpoint. -func (s *ManagedClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { +func (s *ManagedClusterScope) ControlPlaneEndpoint() clusterv1beta1.APIEndpoint { endpoint := s.GCPManagedCluster.Spec.ControlPlaneEndpoint endpoint.Port = ptr.Deref(s.Cluster.Spec.ClusterNetwork.APIServerPort, 443) return endpoint } // FailureDomains returns the cluster failure domains. -func (s *ManagedClusterScope) FailureDomains() clusterv1.FailureDomains { +func (s *ManagedClusterScope) FailureDomains() clusterv1beta1.FailureDomains { return s.GCPManagedCluster.Status.FailureDomains } @@ -193,12 +193,12 @@ func (s *ManagedClusterScope) SetReady() { } // SetFailureDomains sets cluster failure domains. -func (s *ManagedClusterScope) SetFailureDomains(fd clusterv1.FailureDomains) { +func (s *ManagedClusterScope) SetFailureDomains(fd clusterv1beta1.FailureDomains) { s.GCPManagedCluster.Status.FailureDomains = fd } // SetControlPlaneEndpoint sets cluster control-plane endpoint. -func (s *ManagedClusterScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { +func (s *ManagedClusterScope) SetControlPlaneEndpoint(endpoint clusterv1beta1.APIEndpoint) { s.GCPManagedCluster.Spec.ControlPlaneEndpoint = endpoint } diff --git a/cloud/scope/managedcontrolplane.go b/cloud/scope/managedcontrolplane.go index b168bab0c..b11dd2432 100644 --- a/cloud/scope/managedcontrolplane.go +++ b/cloud/scope/managedcontrolplane.go @@ -22,15 +22,15 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/util/location" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" container "cloud.google.com/go/container/apiv1" credentials "cloud.google.com/go/iam/credentials/apiv1" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" "github.com/pkg/errors" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -45,7 +45,7 @@ type ManagedControlPlaneScopeParams struct { ManagedClusterClient *container.ClusterManagerClient TagBindingsClient *resourcemanager.TagBindingsClient Client client.Client - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane } @@ -91,7 +91,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane params.CredentialsClient = credentialsClient } - helper, err := patch.NewHelper(params.GCPManagedControlPlane, params.Client) + helper, err := v1beta1patch.NewHelper(params.GCPManagedControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -112,9 +112,9 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane // ManagedControlPlaneScope defines the basic context for an actuator to operate upon. type ManagedControlPlaneScope struct { client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper - Cluster *clusterv1.Cluster + Cluster *clusterv1beta1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane mcClient *container.ClusterManagerClient @@ -122,7 +122,7 @@ type ManagedControlPlaneScope struct { credentialsClient *credentials.IamCredentialsClient credential *Credential - AllMachinePools []clusterv1.MachinePool + AllMachinePools []clusterv1beta1.MachinePool AllManagedMachinePools []infrav1exp.GCPManagedMachinePool } @@ -131,7 +131,7 @@ func (s *ManagedControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.GCPManagedControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneUpdatingCondition, @@ -148,7 +148,7 @@ func (s *ManagedControlPlaneScope) Close() error { } // ConditionSetter return a condition setter (which is GCPManagedControlPlane itself). -func (s *ManagedControlPlaneScope) ConditionSetter() conditions.Setter { +func (s *ManagedControlPlaneScope) ConditionSetter() v1beta1conditions.Setter { return s.GCPManagedControlPlane } @@ -178,14 +178,14 @@ func (s *ManagedControlPlaneScope) GetCredential() *Credential { } // GetAllNodePools gets all node pools for the control plane. -func (s *ManagedControlPlaneScope) GetAllNodePools(ctx context.Context) ([]infrav1exp.GCPManagedMachinePool, []clusterv1.MachinePool, error) { +func (s *ManagedControlPlaneScope) GetAllNodePools(ctx context.Context) ([]infrav1exp.GCPManagedMachinePool, []clusterv1beta1.MachinePool, error) { if len(s.AllManagedMachinePools) == 0 { listOptions := []client.ListOption{ client.InNamespace(s.GCPManagedControlPlane.Namespace), - client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: s.Cluster.Name}), + client.MatchingLabels(map[string]string{clusterv1beta1.ClusterNameLabel: s.Cluster.Name}), } - machinePoolList := &clusterv1.MachinePoolList{} + machinePoolList := &clusterv1beta1.MachinePoolList{} if err := s.client.List(ctx, machinePoolList, listOptions...); err != nil { return nil, nil, err } @@ -226,7 +226,7 @@ func (s *ManagedControlPlaneScope) ClusterName() string { // SetEndpoint sets the Endpoint of GCPManagedControlPlane. func (s *ManagedControlPlaneScope) SetEndpoint(host string) { - s.GCPManagedControlPlane.Spec.Endpoint = clusterv1.APIEndpoint{ + s.GCPManagedControlPlane.Spec.Endpoint = clusterv1beta1.APIEndpoint{ Host: host, Port: APIServerPort, } diff --git a/cloud/scope/managedmachinepool.go b/cloud/scope/managedmachinepool.go index 2fdd46eba..14ac88d75 100644 --- a/cloud/scope/managedmachinepool.go +++ b/cloud/scope/managedmachinepool.go @@ -26,15 +26,15 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/util/location" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" compute "cloud.google.com/go/compute/apiv1" container "cloud.google.com/go/container/apiv1" "cloud.google.com/go/container/apiv1/containerpb" "github.com/pkg/errors" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -43,8 +43,8 @@ type ManagedMachinePoolScopeParams struct { ManagedClusterClient *container.ClusterManagerClient InstanceGroupManagersClient *compute.InstanceGroupManagersClient Client client.Client - Cluster *clusterv1.Cluster - MachinePool *clusterv1.MachinePool + Cluster *clusterv1beta1.Cluster + MachinePool *clusterv1beta1.MachinePool GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane GCPManagedMachinePool *infrav1exp.GCPManagedMachinePool @@ -84,7 +84,7 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc params.InstanceGroupManagersClient = instanceGroupManagersClient } - helper, err := patch.NewHelper(params.GCPManagedMachinePool, params.Client) + helper, err := v1beta1patch.NewHelper(params.GCPManagedMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -104,10 +104,10 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc // ManagedMachinePoolScope defines the basic context for an actuator to operate upon. type ManagedMachinePoolScope struct { client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper - Cluster *clusterv1.Cluster - MachinePool *clusterv1.MachinePool + Cluster *clusterv1beta1.Cluster + MachinePool *clusterv1beta1.MachinePool GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane GCPManagedMachinePool *infrav1exp.GCPManagedMachinePool @@ -120,7 +120,7 @@ func (s *ManagedMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.GCPManagedMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolUpdatingCondition, @@ -136,7 +136,7 @@ func (s *ManagedMachinePoolScope) Close() error { } // ConditionSetter return a condition setter (which is GCPManagedMachinePool itself). -func (s *ManagedMachinePoolScope) ConditionSetter() conditions.Setter { +func (s *ManagedMachinePoolScope) ConditionSetter() v1beta1conditions.Setter { return s.GCPManagedMachinePool } @@ -166,7 +166,7 @@ func NodePoolResourceLabels(additionalLabels infrav1.Labels, clusterName string) } // ConvertToSdkNodePool converts a node pool to format that is used by GCP SDK. -func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1.MachinePool, regional bool, clusterName string) *containerpb.NodePool { +func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1beta1.MachinePool, regional bool, clusterName string) *containerpb.NodePool { replicas := *machinePool.Spec.Replicas if regional { if len(nodePool.Spec.NodeLocations) != 0 { @@ -275,7 +275,7 @@ func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool } // ConvertToSdkNodePools converts node pools to format that is used by GCP SDK. -func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1.MachinePool, regional bool, clusterName string) []*containerpb.NodePool { +func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1beta1.MachinePool, regional bool, clusterName string) []*containerpb.NodePool { res := []*containerpb.NodePool{} for i := range nodePools { res = append(res, ConvertToSdkNodePool(nodePools[i], machinePools[i], regional, clusterName)) diff --git a/cloud/scope/managedmachinepool_test.go b/cloud/scope/managedmachinepool_test.go index 8a08e5f25..3eff7504f 100644 --- a/cloud/scope/managedmachinepool_test.go +++ b/cloud/scope/managedmachinepool_test.go @@ -8,12 +8,12 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( TestGCPMMP *v1beta1.GCPManagedMachinePool - TestMP *clusterv1exp.MachinePool + TestMP *clusterv1beta1.MachinePool TestClusterName string ) @@ -36,8 +36,8 @@ var _ = Describe("GCPManagedMachinePool Scope", func() { }, }, } - TestMP = &clusterv1exp.MachinePool{ - Spec: clusterv1exp.MachinePoolSpec{ + TestMP = &clusterv1beta1.MachinePool{ + Spec: clusterv1beta1.MachinePoolSpec{ Replicas: &replicas, }, } diff --git a/cloud/services/compute/firewalls/reconcile_test.go b/cloud/services/compute/firewalls/reconcile_test.go index aa1b080af..2e55747a0 100644 --- a/cloud/services/compute/firewalls/reconcile_test.go +++ b/cloud/services/compute/firewalls/reconcile_test.go @@ -32,21 +32,21 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1.AddToScheme(scheme.Scheme) + _ = clusterv1beta1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } -var fakeCluster = &clusterv1.Cluster{ +var fakeCluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{}, + Spec: clusterv1beta1.ClusterSpec{}, } var fakeGCPCluster = &infrav1.GCPCluster{ diff --git a/cloud/services/compute/instances/reconcile_test.go b/cloud/services/compute/instances/reconcile_test.go index bb5404134..d27d0d220 100644 --- a/cloud/services/compute/instances/reconcile_test.go +++ b/cloud/services/compute/instances/reconcile_test.go @@ -35,12 +35,12 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1.AddToScheme(scheme.Scheme) + _ = clusterv1beta1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } @@ -54,21 +54,21 @@ var fakeBootstrapSecret = &corev1.Secret{ }, } -var fakeCluster = &clusterv1.Cluster{ +var fakeCluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{}, + Spec: clusterv1beta1.ClusterSpec{}, } -var fakeMachine = &clusterv1.Machine{ +var fakeMachine = &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", Namespace: "default", }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ + Spec: clusterv1beta1.MachineSpec{ + Bootstrap: clusterv1beta1.Bootstrap{ DataSecretName: ptr.To[string]("my-cluster-bootstrap"), }, FailureDomain: ptr.To[string]("us-central1-c"), @@ -76,13 +76,13 @@ var fakeMachine = &clusterv1.Machine{ }, } -var fakeMachineWithOutFailureDomain = &clusterv1.Machine{ +var fakeMachineWithOutFailureDomain = &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", Namespace: "default", }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ + Spec: clusterv1beta1.MachineSpec{ + Bootstrap: clusterv1beta1.Bootstrap{ DataSecretName: ptr.To[string]("my-cluster-bootstrap"), }, Version: ptr.To[string]("v1.19.11"), @@ -99,10 +99,10 @@ var fakeGCPClusterWithOutFailureDomain = &infrav1.GCPCluster{ Region: "us-central1", }, Status: infrav1.GCPClusterStatus{ - FailureDomains: clusterv1.FailureDomains{ - "us-central1-a": clusterv1.FailureDomainSpec{ControlPlane: true}, - "us-central1-b": clusterv1.FailureDomainSpec{ControlPlane: true}, - "us-central1-c": clusterv1.FailureDomainSpec{ControlPlane: true}, + FailureDomains: clusterv1beta1.FailureDomains{ + "us-central1-a": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, + "us-central1-b": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, + "us-central1-c": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, }, }, } diff --git a/cloud/services/compute/loadbalancers/reconcile_test.go b/cloud/services/compute/loadbalancers/reconcile_test.go index ae5db760c..909a18a50 100644 --- a/cloud/services/compute/loadbalancers/reconcile_test.go +++ b/cloud/services/compute/loadbalancers/reconcile_test.go @@ -31,14 +31,14 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) var lbTypeInternal = infrav1.Internal func init() { - _ = clusterv1.AddToScheme(scheme.Scheme) + _ = clusterv1beta1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } @@ -47,12 +47,12 @@ func getBaseClusterScope() (*scope.ClusterScope, error) { WithScheme(scheme.Scheme). Build() - fakeCluster := &clusterv1.Cluster{ + fakeCluster := &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{}, + Spec: clusterv1beta1.ClusterSpec{}, } fakeGCPCluster := &infrav1.GCPCluster{ @@ -75,8 +75,8 @@ func getBaseClusterScope() (*scope.ClusterScope, error) { }, }, Status: infrav1.GCPClusterStatus{ - FailureDomains: clusterv1.FailureDomains{ - "us-central1-a": clusterv1.FailureDomainSpec{ControlPlane: true}, + FailureDomains: clusterv1beta1.FailureDomains{ + "us-central1-a": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, }, }, } @@ -124,7 +124,7 @@ func getBaseClusterScopeWithPortSet() (*scope.ClusterScope, error) { } port := int32(6443) - clusterScope.Cluster.Spec.ClusterNetwork = &clusterv1.ClusterNetwork{ + clusterScope.Cluster.Spec.ClusterNetwork = &clusterv1beta1.ClusterNetwork{ APIServerPort: &port, } return clusterScope, nil diff --git a/cloud/services/compute/networks/reconcile_test.go b/cloud/services/compute/networks/reconcile_test.go index e0bcde88b..827252137 100644 --- a/cloud/services/compute/networks/reconcile_test.go +++ b/cloud/services/compute/networks/reconcile_test.go @@ -31,21 +31,21 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1.AddToScheme(scheme.Scheme) + _ = clusterv1beta1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } -var fakeCluster = &clusterv1.Cluster{ +var fakeCluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{}, + Spec: clusterv1beta1.ClusterSpec{}, } var fakeGCPCluster = &infrav1.GCPCluster{ diff --git a/cloud/services/compute/subnets/reconcile_test.go b/cloud/services/compute/subnets/reconcile_test.go index 3d8030bf6..ec7abb00e 100644 --- a/cloud/services/compute/subnets/reconcile_test.go +++ b/cloud/services/compute/subnets/reconcile_test.go @@ -33,21 +33,21 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1.AddToScheme(scheme.Scheme) + _ = clusterv1beta1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } -var fakeCluster = &clusterv1.Cluster{ +var fakeCluster = &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1.ClusterSpec{}, + Spec: clusterv1beta1.ClusterSpec{}, } var fakeGCPCluster = &infrav1.GCPCluster{ diff --git a/cloud/services/container/clusters/reconcile.go b/cloud/services/container/clusters/reconcile.go index 8b1bdf577..3adcc40d7 100644 --- a/cloud/services/container/clusters/reconcile.go +++ b/cloud/services/container/clusters/reconcile.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/codes" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -48,7 +48,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { if err != nil { s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "describing cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "describing cluster: %v", err) return ctrl.Result{}, err } if cluster == nil { @@ -58,40 +58,40 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { nodePools, _, err := s.scope.GetAllNodePools(ctx) if err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "fetching node pools: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "fetching node pools: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "fetching node pools: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "fetching node pools: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "fetching node pools: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "fetching node pools: %v", err) return ctrl.Result{}, err } if s.scope.IsAutopilotCluster() { if len(nodePools) > 0 { log.Error(ErrAutopilotClusterMachinePoolsNotAllowed, fmt.Sprintf("%d machine pools defined", len(nodePools))) - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, ErrAutopilotClusterMachinePoolsNotAllowed } } else { if len(nodePools) == 0 { log.Info("At least 1 node pool is required to create GKE cluster with autopilot disabled") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } } if err = s.createCluster(ctx, &log); err != nil { log.Error(err, "failed creating cluster") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating cluster: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating cluster: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating cluster: %v", err) return ctrl.Result{}, err } log.Info("Cluster created provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } @@ -103,23 +103,23 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { switch cluster.GetStatus() { case containerpb.Cluster_PROVISIONING: log.Info("Cluster provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.Cluster_RECONCILING: log.Info("Cluster reconciling in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = true s.scope.GCPManagedControlPlane.Status.Ready = true return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.Cluster_STOPPING: log.Info("Cluster stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil @@ -129,7 +129,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { msg = cluster.GetConditions()[0].GetMessage() } log.Error(errors.New("Cluster in error/degraded state"), msg, "name", s.scope.ClusterName()) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneErrorReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneErrorReason, clusterv1beta1.ConditionSeverityError, "") s.scope.GCPManagedControlPlane.Status.Ready = false s.scope.GCPManagedControlPlane.Status.Initialized = false return ctrl.Result{}, nil @@ -149,12 +149,12 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, err } log.Info("Cluster updating in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = true s.scope.GCPManagedControlPlane.Status.Ready = true return ctrl.Result{}, nil } - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition, infrav1exp.GKEControlPlaneUpdatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition, infrav1exp.GKEControlPlaneUpdatedReason, clusterv1beta1.ConditionSeverityInfo, "") // Reconcile kubeconfig err = s.reconcileKubeconfig(ctx, cluster, &log) @@ -169,9 +169,9 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { } s.scope.SetEndpoint(cluster.GetEndpoint()) - conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneCreatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.GCPManagedControlPlane.Status.Ready = true s.scope.GCPManagedControlPlane.Status.Initialized = true @@ -191,7 +191,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { } if cluster == nil { log.Info("Cluster already deleted") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneDeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -204,23 +204,23 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, nil case containerpb.Cluster_STOPPING: log.Info("Cluster stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) return ctrl.Result{}, nil default: break } if err = s.deleteCluster(ctx, &log); err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "deleting cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "deleting cluster: %v", err) return ctrl.Result{}, err } log.Info("Cluster deleting in progress") s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) return ctrl.Result{}, nil } diff --git a/cloud/services/container/nodepools/reconcile.go b/cloud/services/container/nodepools/reconcile.go index cd24f91e2..52f42d31a 100644 --- a/cloud/services/container/nodepools/reconcile.go +++ b/cloud/services/container/nodepools/reconcile.go @@ -38,8 +38,8 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/shared" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -47,7 +47,7 @@ import ( // setReadyStatusFromConditions updates the GCPManagedMachinePool's ready status based on its conditions. func (s *Service) setReadyStatusFromConditions() { machinePool := s.scope.GCPManagedMachinePool - if conditions.IsTrue(machinePool, clusterv1.ReadyCondition) || conditions.IsTrue(machinePool, infrav1exp.GKEMachinePoolUpdatingCondition) { + if v1beta1conditions.IsTrue(machinePool, clusterv1beta1.ReadyCondition) || v1beta1conditions.IsTrue(machinePool, infrav1exp.GKEMachinePoolUpdatingCondition) { s.scope.GCPManagedMachinePool.Status.Ready = true return } @@ -65,28 +65,28 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { nodePool, err := s.describeNodePool(ctx, &log) if err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "reading node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "reading node pool: %v", err) return ctrl.Result{}, err } if nodePool == nil { log.Info("Node pool not found, creating", "cluster", s.scope.Cluster.Name) if err = s.createNodePool(ctx, &log); err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating node pool: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating node pool: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating node pool: %v", err) return ctrl.Result{}, err } log.Info("Node pool provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } log.V(2).Info("Node pool found", "cluster", s.scope.Cluster.Name, "nodepool", nodePool.GetName()) instances, err := s.getInstances(ctx, nodePool) if err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "reading instances: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "reading instances: %v", err) return ctrl.Result{}, err } providerIDList := []string{} @@ -95,7 +95,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { providerID, err := providerid.NewFromResourceURL(instance.GetInstance()) if err != nil { log.Error(err, "parsing instance url", "url", instance.GetInstance()) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, err } providerIDList = append(providerIDList, providerID.String()) @@ -108,21 +108,21 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { case containerpb.NodePool_PROVISIONING: // node pool is creating log.Info("Node pool provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.NodePool_RECONCILING: // node pool is updating/reconciling log.Info("Node pool reconciling in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.NodePool_STOPPING: // node pool is deleting log.Info("Node pool stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) return ctrl.Result{}, nil case containerpb.NodePool_ERROR, containerpb.NodePool_RUNNING_WITH_ERROR: // node pool is in error or degraded state @@ -131,13 +131,13 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { msg = nodePool.GetConditions()[0].GetMessage() } log.Error(errors.New("Node pool in error/degraded state"), msg, "name", s.scope.GCPManagedMachinePool.Name) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, nil case containerpb.NodePool_RUNNING: // node pool is ready and running - conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") log.Info("Node pool running") default: log.Error(errors.New("Unhandled node pool status"), fmt.Sprintf("Unhandled node pool status %s", nodePool.GetStatus()), "name", s.scope.GCPManagedMachinePool.Name) @@ -153,7 +153,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { } log.Info("Node pool config updating in progress") s.scope.GCPManagedMachinePool.Status.Ready = true - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } @@ -165,7 +165,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, err } log.Info("Node pool auto scaling updating in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } @@ -177,18 +177,18 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, err } log.Info("Node pool size updating in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition, infrav1exp.GKEMachinePoolUpdatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition, infrav1exp.GKEMachinePoolUpdatedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.SetReplicas(int32(len(s.scope.GCPManagedMachinePool.Spec.ProviderIDList))) log.Info("Node pool reconciled") s.scope.GCPManagedMachinePool.Status.Ready = true - conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -206,7 +206,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { } if nodePool == nil { log.Info("Node pool already deleted") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolDeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, err } @@ -219,21 +219,21 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.NodePool_STOPPING: log.Info("Node pool stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil default: break } if err = s.deleteNodePool(ctx); err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "deleting node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "deleting node pool: %v", err) return ctrl.Result{}, err } log.Info("Node pool deleting in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) return ctrl.Result{}, nil } diff --git a/cloud/services/shared/machinepool.go b/cloud/services/shared/machinepool.go index 33a7eeb96..87afa8aec 100644 --- a/cloud/services/shared/machinepool.go +++ b/cloud/services/shared/machinepool.go @@ -21,14 +21,14 @@ import ( "fmt" "strings" - clusterv1exp "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" ) // ManagedMachinePoolPreflightCheck will perform checks against the machine pool before its created. -func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachinePool, machinePool *clusterv1exp.MachinePool, location string) error { +func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachinePool, machinePool *clusterv1beta1.MachinePool, location string) error { if machinePool.Spec.Template.Spec.InfrastructureRef.Name != managedPool.Name { return fmt.Errorf("expect machinepool infraref (%s) to match managed machine pool name (%s)", machinePool.Spec.Template.Spec.InfrastructureRef.Name, managedPool.Name) } @@ -49,7 +49,7 @@ func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachineP } // ManagedMachinePoolsPreflightCheck will perform checks against a slice of machine pool before they are created. -func ManagedMachinePoolsPreflightCheck(managedPools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1exp.MachinePool, location string) error { +func ManagedMachinePoolsPreflightCheck(managedPools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1beta1.MachinePool, location string) error { if len(machinePools) != len(managedPools) { return errors.New("each machinepool must have a matching gcpmanagedmachinepool") } diff --git a/controllers/gcpcluster_controller.go b/controllers/gcpcluster_controller.go index 3d6e3962c..6b8674c97 100644 --- a/controllers/gcpcluster_controller.go +++ b/controllers/gcpcluster_controller.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/subnets" "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" @@ -75,7 +75,7 @@ func (r *GCPClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma clusterToInfraFn := util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("GCPCluster"), mgr.GetClient(), &infrav1.GCPCluster{}) if err = c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(func(mapCtx context.Context, o client.Object) []reconcile.Request { requests := clusterToInfraFn(mapCtx, o) if requests == nil { @@ -179,18 +179,18 @@ func (r *GCPClusterReconciler) reconcile(ctx context.Context, clusterScope *scop return ctrl.Result{}, err } - failureDomains := make(clusterv1.FailureDomains, len(zones)) + failureDomains := make(clusterv1beta1.FailureDomains, len(zones)) for _, zone := range zones { if len(clusterScope.GCPCluster.Spec.FailureDomains) > 0 { for _, fd := range clusterScope.GCPCluster.Spec.FailureDomains { if fd == zone.Name { - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: true, } } } } else { - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: true, } } diff --git a/controllers/gcpmachine_controller.go b/controllers/gcpmachine_controller.go index 87c4f582c..f56d8b72b 100644 --- a/controllers/gcpmachine_controller.go +++ b/controllers/gcpmachine_controller.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/instances" "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" @@ -61,7 +61,7 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma For(&infrav1.GCPMachine{}). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Watches( - &clusterv1.Machine{}, + &clusterv1beta1.Machine{}, handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("GCPMachine"))), ). Watches( @@ -80,7 +80,7 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma // Add a watch on clusterv1.Cluster object for unpause & ready notifications. if err := c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), )); err != nil { @@ -112,8 +112,8 @@ func (r *GCPMachineReconciler) GCPClusterToGCPMachines(ctx context.Context) hand return result } - labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} - machineList := &clusterv1.MachineList{} + labels := map[string]string{clusterv1beta1.ClusterNameLabel: cluster.Name} + machineList := &clusterv1beta1.MachineList{} if err := r.List(mapCtx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { log.Error(err, "failed to list Machines") return nil diff --git a/controllers/gcpmachine_controller_unit_test.go b/controllers/gcpmachine_controller_unit_test.go index 055134660..f5ca27f88 100644 --- a/controllers/gcpmachine_controller_unit_test.go +++ b/controllers/gcpmachine_controller_unit_test.go @@ -25,16 +25,16 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func newMachine(clusterName, machineName string) *clusterv1.Machine { - return &clusterv1.Machine{ +func newMachine(clusterName, machineName string) *clusterv1beta1.Machine { + return &clusterv1beta1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1.ClusterNameLabel: clusterName, + clusterv1beta1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", @@ -42,7 +42,7 @@ func newMachine(clusterName, machineName string) *clusterv1.Machine { } } -func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1.Machine { +func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1beta1.Machine { m := newMachine(clusterName, machineName) m.Spec.InfrastructureRef = corev1.ObjectReference{ Kind: "GCPMachine", @@ -54,8 +54,8 @@ func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1 return m } -func newCluster(name string) *clusterv1.Cluster { - return &clusterv1.Cluster{ +func newCluster(name string) *clusterv1beta1.Cluster { + return &clusterv1beta1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", @@ -70,7 +70,7 @@ func TestGCPMachineReconciler_GCPClusterToGCPMachines(t *testing.T) { scheme := runtime.NewScheme() g.Expect(infrav1.AddToScheme(scheme)).To(Succeed()) - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + g.Expect(clusterv1beta1.AddToScheme(scheme)).To(Succeed()) clusterName := "my-cluster" initObjects := []runtime.Object{ @@ -96,7 +96,7 @@ func TestGCPMachineReconciler_GCPClusterToGCPMachines(t *testing.T) { { Name: clusterName, Kind: "Cluster", - APIVersion: clusterv1.GroupVersion.String(), + APIVersion: clusterv1beta1.GroupVersion.String(), }, }, }, diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 9b3c1bf31..da5d83e0e 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -69,7 +69,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).ToNot(BeNil()) - Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(clusterv1beta1.AddToScheme(scheme.Scheme)).To(Succeed()) Expect(infrav1.AddToScheme(scheme.Scheme)).To(Succeed()) // +kubebuilder:scaffold:scheme diff --git a/exp/api/v1beta1/conditions_consts.go b/exp/api/v1beta1/conditions_consts.go index b651dde77..bc806228b 100644 --- a/exp/api/v1beta1/conditions_consts.go +++ b/exp/api/v1beta1/conditions_consts.go @@ -16,17 +16,17 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // GKEControlPlaneReadyCondition condition reports on the successful reconciliation of GKE control plane. - GKEControlPlaneReadyCondition clusterv1.ConditionType = "GKEControlPlaneReady" + GKEControlPlaneReadyCondition clusterv1beta1.ConditionType = "GKEControlPlaneReady" // GKEControlPlaneCreatingCondition condition reports on whether the GKE control plane is creating. - GKEControlPlaneCreatingCondition clusterv1.ConditionType = "GKEControlPlaneCreating" + GKEControlPlaneCreatingCondition clusterv1beta1.ConditionType = "GKEControlPlaneCreating" // GKEControlPlaneUpdatingCondition condition reports on whether the GKE control plane is updating. - GKEControlPlaneUpdatingCondition clusterv1.ConditionType = "GKEControlPlaneUpdating" + GKEControlPlaneUpdatingCondition clusterv1beta1.ConditionType = "GKEControlPlaneUpdating" // GKEControlPlaneDeletingCondition condition reports on whether the GKE control plane is deleting. - GKEControlPlaneDeletingCondition clusterv1.ConditionType = "GKEControlPlaneDeleting" + GKEControlPlaneDeletingCondition clusterv1beta1.ConditionType = "GKEControlPlaneDeleting" // GKEControlPlaneCreatingReason used to report GKE control plane being created. GKEControlPlaneCreatingReason = "GKEControlPlaneCreating" @@ -46,13 +46,13 @@ const ( GKEControlPlaneRequiresAtLeastOneNodePoolReason = "GKEControlPlaneRequiresAtLeastOneNodePool" // GKEMachinePoolReadyCondition condition reports on the successful reconciliation of GKE node pool. - GKEMachinePoolReadyCondition clusterv1.ConditionType = "GKEMachinePoolReady" + GKEMachinePoolReadyCondition clusterv1beta1.ConditionType = "GKEMachinePoolReady" // GKEMachinePoolCreatingCondition condition reports on whether the GKE node pool is creating. - GKEMachinePoolCreatingCondition clusterv1.ConditionType = "GKEMachinePoolCreating" + GKEMachinePoolCreatingCondition clusterv1beta1.ConditionType = "GKEMachinePoolCreating" // GKEMachinePoolUpdatingCondition condition reports on whether the GKE node pool is updating. - GKEMachinePoolUpdatingCondition clusterv1.ConditionType = "GKEMachinePoolUpdating" + GKEMachinePoolUpdatingCondition clusterv1beta1.ConditionType = "GKEMachinePoolUpdating" // GKEMachinePoolDeletingCondition condition reports on whether the GKE node pool is deleting. - GKEMachinePoolDeletingCondition clusterv1.ConditionType = "GKEMachinePoolDeleting" + GKEMachinePoolDeletingCondition clusterv1beta1.ConditionType = "GKEMachinePoolDeleting" // WaitingForGKEControlPlaneReason used when the machine pool is waiting for GKE control plane infrastructure to be ready before proceeding. WaitingForGKEControlPlaneReason = "WaitingForGKEControlPlane" diff --git a/exp/api/v1beta1/gcpmanagedcluster_types.go b/exp/api/v1beta1/gcpmanagedcluster_types.go index 21d02cee0..038938447 100644 --- a/exp/api/v1beta1/gcpmanagedcluster_types.go +++ b/exp/api/v1beta1/gcpmanagedcluster_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -38,7 +38,7 @@ type GCPManagedClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // NetworkSpec encapsulates all things related to the GCP network. // +optional @@ -72,11 +72,11 @@ type GCPManagedClusterSpec struct { // GCPManagedClusterStatus defines the observed state of GCPManagedCluster. type GCPManagedClusterStatus struct { - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Network infrav1.Network `json:"network,omitempty"` - Ready bool `json:"ready"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` + Network infrav1.Network `json:"network,omitempty"` + Ready bool `json:"ready"` // Conditions specifies the conditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true diff --git a/exp/api/v1beta1/gcpmanagedcontrolplane_types.go b/exp/api/v1beta1/gcpmanagedcontrolplane_types.go index 597697ff5..a800ca348 100644 --- a/exp/api/v1beta1/gcpmanagedcontrolplane_types.go +++ b/exp/api/v1beta1/gcpmanagedcontrolplane_types.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/strings/slices" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -168,7 +168,7 @@ type GCPManagedControlPlaneSpec struct { // Endpoint represents the endpoint used to communicate with the control plane. // +optional - Endpoint clusterv1.APIEndpoint `json:"endpoint"` + Endpoint clusterv1beta1.APIEndpoint `json:"endpoint"` } // GCPManagedControlPlaneStatus defines the observed state of GCPManagedControlPlane. @@ -184,7 +184,7 @@ type GCPManagedControlPlaneStatus struct { Initialized bool `json:"initialized,omitempty"` // Conditions specifies the conditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // CurrentVersion shows the current version of the GKE control plane. // @@ -301,12 +301,12 @@ func (m MonitoringService) String() string { } // GetConditions returns the control planes conditions. -func (r *GCPManagedControlPlane) GetConditions() clusterv1.Conditions { +func (r *GCPManagedControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the GCPManagedControlPlane. -func (r *GCPManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *GCPManagedControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/gcpmanagedmachinepool_types.go b/exp/api/v1beta1/gcpmanagedmachinepool_types.go index e7d11ab73..cbeda0fc5 100644 --- a/exp/api/v1beta1/gcpmanagedmachinepool_types.go +++ b/exp/api/v1beta1/gcpmanagedmachinepool_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -116,7 +116,7 @@ type GCPManagedMachinePoolStatus struct { // +optional Replicas int32 `json:"replicas"` // Conditions specifies the cpnditions for the managed machine pool - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. // +optional InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` @@ -211,12 +211,12 @@ const ( ) // GetConditions returns the machine pool conditions. -func (r *GCPManagedMachinePool) GetConditions() clusterv1.Conditions { +func (r *GCPManagedMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the GCPManagedMachinePool. -func (r *GCPManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +func (r *GCPManagedMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/types_template.go b/exp/api/v1beta1/types_template.go index eec236398..1e7b46ea1 100644 --- a/exp/api/v1beta1/types_template.go +++ b/exp/api/v1beta1/types_template.go @@ -18,7 +18,7 @@ package v1beta1 import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GCPManagedControlPlaneTemplateResourceSpec specifies an GCP managed control plane template resource. @@ -45,7 +45,7 @@ type GCPManagedClusterTemplateResourceSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // NetworkSpec encapsulates all things related to the GCP network. // +optional diff --git a/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go b/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go index b1b32c592..cbe7a417c 100644 --- a/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go +++ b/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GKEConfigSpec defines the desired state of GCP GKE Bootstrap Configuration. @@ -64,7 +64,7 @@ type GKEConfigStatus struct { // Conditions defines current service state of the GKEConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true diff --git a/exp/bootstrap/gke/controllers/gkeconfig_controller.go b/exp/bootstrap/gke/controllers/gkeconfig_controller.go index c92a6f500..fd22206e8 100644 --- a/exp/bootstrap/gke/controllers/gkeconfig_controller.go +++ b/exp/bootstrap/gke/controllers/gkeconfig_controller.go @@ -34,7 +34,7 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" bootstrapv1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/bootstrap/gke/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -154,7 +154,7 @@ func (r *GKEConfigReconciler) ManagedMachinePoolToGKEConfigMapFunc(_ context.Con } } -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expclusterv1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -163,7 +163,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == expclusterv1.GroupVersion.Group { + if gv.Group == clusterv1beta1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -171,8 +171,8 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object return nil, nil } -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expclusterv1.MachinePool, error) { - m := &expclusterv1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.MachinePool, error) { + m := &clusterv1beta1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err diff --git a/exp/controllers/gcpmanagedcluster_controller.go b/exp/controllers/gcpmanagedcluster_controller.go index 98085dd3e..a545febda 100644 --- a/exp/controllers/gcpmanagedcluster_controller.go +++ b/exp/controllers/gcpmanagedcluster_controller.go @@ -34,7 +34,7 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" @@ -156,7 +156,7 @@ func (r *GCPManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr } if err = c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1exp.GroupVersion.WithKind("GCPManagedCluster"), mgr.GetClient(), &infrav1exp.GCPManagedCluster{})), predicates.ClusterUnpaused(mgr.GetScheme(), log), predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue), @@ -186,9 +186,9 @@ func (r *GCPManagedClusterReconciler) reconcile(ctx context.Context, clusterScop return err } - failureDomains := make(clusterv1.FailureDomains, len(zones)) + failureDomains := make(clusterv1beta1.FailureDomains, len(zones)) for _, zone := range zones { - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: false, } } @@ -315,7 +315,7 @@ func (r *GCPManagedClusterReconciler) dependencyCount(ctx context.Context, clust listOptions := []client.ListOption{ client.InNamespace(clusterNamespace), - client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), + client.MatchingLabels(map[string]string{clusterv1beta1.ClusterNameLabel: clusterName}), } managedMachinePools := &infrav1exp.GCPManagedMachinePoolList{} diff --git a/exp/controllers/gcpmanagedcontrolplane_controller.go b/exp/controllers/gcpmanagedcontrolplane_controller.go index 9887fcf21..b6f577863 100644 --- a/exp/controllers/gcpmanagedcontrolplane_controller.go +++ b/exp/controllers/gcpmanagedcontrolplane_controller.go @@ -29,9 +29,9 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -73,7 +73,7 @@ func (r *GCPManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, } if err = c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, gcpManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &infrav1exp.GCPManagedControlPlane{})), capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), )); err != nil { @@ -207,7 +207,7 @@ func (r *GCPManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, } if managedControlPlaneScope.GCPManagedControlPlane != nil && - conditions.Get(managedControlPlaneScope.GCPManagedControlPlane, infrav1exp.GKEControlPlaneDeletingCondition).Reason == infrav1exp.GKEControlPlaneDeletedReason { + v1beta1conditions.Get(managedControlPlaneScope.GCPManagedControlPlane, infrav1exp.GKEControlPlaneDeletingCondition).Reason == infrav1exp.GKEControlPlaneDeletedReason { controllerutil.RemoveFinalizer(managedControlPlaneScope.GCPManagedControlPlane, infrav1exp.ManagedControlPlaneFinalizer) } diff --git a/exp/controllers/gcpmanagedmachinepool_controller.go b/exp/controllers/gcpmanagedmachinepool_controller.go index 99535936b..e29e5d3cd 100644 --- a/exp/controllers/gcpmanagedmachinepool_controller.go +++ b/exp/controllers/gcpmanagedmachinepool_controller.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/container/nodepools" "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/record" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -69,7 +69,7 @@ func GetOwnerClusterKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) { if err != nil { return nil, errors.WithStack(err) } - if gv.Group == clusterv1.GroupVersion.Group { + if gv.Group == clusterv1beta1.GroupVersion.Group { return &client.ObjectKey{ Namespace: obj.Namespace, Name: ref.Name, @@ -81,7 +81,7 @@ func GetOwnerClusterKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) { func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*clusterv1.MachinePool) + m, ok := o.(*clusterv1beta1.MachinePool) if !ok { panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) } @@ -125,9 +125,9 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema. return nil } - managedPoolForClusterList := clusterv1.MachinePoolList{} + managedPoolForClusterList := clusterv1beta1.MachinePoolList{} if err := c.List( - ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, + ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterKey.Name}, ); err != nil { log.Error(err, "couldn't list pools for cluster") return nil @@ -159,7 +159,7 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, For(&infrav1exp.GCPManagedMachinePool{}). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue)). Watches( - &clusterv1.MachinePool{}, + &clusterv1beta1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -178,7 +178,7 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, // Add a watch on clusterv1.Cluster object for unpause & ready notifications. if err := c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), )); err != nil { @@ -189,8 +189,8 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, } // getMachinePoolByName finds and return a Machine object using the specified params. -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachinePool, error) { - m := &clusterv1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.MachinePool, error) { + m := &clusterv1beta1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err @@ -199,7 +199,7 @@ func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name } // getOwnerMachinePool returns the MachinePool object owning the current resource. -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -208,7 +208,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == clusterv1.GroupVersion.Group { + if gv.Group == clusterv1beta1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -286,7 +286,7 @@ func (r *GCPManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr if !gcpManagedControlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(gcpManagedMachinePool, infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.WaitingForGKEControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(gcpManagedMachinePool, infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.WaitingForGKEControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -387,7 +387,7 @@ func (r *GCPManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, m } } - if conditions.Get(managedMachinePoolScope.GCPManagedMachinePool, infrav1exp.GKEMachinePoolDeletingCondition).Reason == infrav1exp.GKEMachinePoolDeletedReason { + if v1beta1conditions.Get(managedMachinePoolScope.GCPManagedMachinePool, infrav1exp.GKEMachinePoolDeletingCondition).Reason == infrav1exp.GKEMachinePoolDeletedReason { controllerutil.RemoveFinalizer(managedMachinePoolScope.GCPManagedMachinePool, infrav1exp.ManagedMachinePoolFinalizer) } diff --git a/main.go b/main.go index 37d782d65..416d87ee5 100644 --- a/main.go +++ b/main.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/feature" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" "sigs.k8s.io/cluster-api-provider-gcp/version" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/util/record" @@ -63,7 +63,7 @@ func init() { _ = clientgoscheme.AddToScheme(scheme) _ = infrav1beta1.AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) _ = gkebootstrapv1exp.AddToScheme(scheme) // +kubebuilder:scaffold:scheme @@ -352,7 +352,7 @@ func initFlags(fs *pflag.FlagSet) { &watchFilterValue, "watch-filter", "", - fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel), + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1beta1.WatchLabel), ) fs.IntVar(&gcpClusterConcurrency, diff --git a/pkg/capiutils/predicates.go b/pkg/capiutils/predicates.go index 608fb34aa..0b242569e 100644 --- a/pkg/capiutils/predicates.go +++ b/pkg/capiutils/predicates.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api/util/predicates" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ClusterUpdateInfraReady returns a predicate that returns true for an update event when a cluster has Status.InfrastructureReady changed from false to true @@ -46,13 +46,13 @@ func ClusterUpdateInfraReady(scheme *runtime.Scheme, logger logr.Logger) predica log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) } - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + oldCluster, ok := e.ObjectOld.(*clusterv1beta1.Cluster) if !ok { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - newCluster := e.ObjectNew.(*clusterv1.Cluster) + newCluster := e.ObjectNew.(*clusterv1beta1.Cluster) if !oldCluster.Status.InfrastructureReady && newCluster.Status.InfrastructureReady { log.V(6).Info("Cluster infrastructure became ready, allowing further processing") @@ -77,13 +77,13 @@ func ClusterPausedTransitions(scheme *runtime.Scheme, logger logr.Logger) predic log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) } - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) + oldCluster, ok := e.ObjectOld.(*clusterv1beta1.Cluster) if !ok { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - newCluster := e.ObjectNew.(*clusterv1.Cluster) + newCluster := e.ObjectNew.(*clusterv1beta1.Cluster) if oldCluster.Spec.Paused && !newCluster.Spec.Paused { log.V(6).Info("Cluster unpausing, allowing further processing") diff --git a/pkg/capiutils/utils.go b/pkg/capiutils/utils.go index f1905451a..1617ececf 100644 --- a/pkg/capiutils/utils.go +++ b/pkg/capiutils/utils.go @@ -24,19 +24,19 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" ) // IsControlPlaneMachine checks machine is a control plane node. -func IsControlPlaneMachine(machine *capiv1beta1.Machine) bool { - _, ok := machine.Labels[capiv1beta1.MachineControlPlaneLabel] +func IsControlPlaneMachine(machine *clusterv1beta1.Machine) bool { + _, ok := machine.Labels[clusterv1beta1.MachineControlPlaneLabel] return ok } // GetOwnerCluster returns the Cluster object owning the current resource. -func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1beta1.Cluster, error) { +func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Cluster, error) { for _, ref := range obj.GetOwnerReferences() { if ref.Kind != "Cluster" { continue @@ -45,7 +45,7 @@ func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta if err != nil { return nil, errors.WithStack(err) } - if gv.Group == capiv1beta1.GroupVersion.Group { + if gv.Group == clusterv1beta1.GroupVersion.Group { return GetClusterByName(ctx, c, obj.Namespace, ref.Name) } } @@ -53,16 +53,16 @@ func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta } // GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. -func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1beta1.Cluster, error) { - if obj.Labels[capiv1beta1.ClusterNameLabel] == "" { +func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Cluster, error) { + if obj.Labels[clusterv1beta1.ClusterNameLabel] == "" { return nil, errors.WithStack(util.ErrNoCluster) } - return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[capiv1beta1.ClusterNameLabel]) + return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1beta1.ClusterNameLabel]) } // GetClusterByName finds and return a Cluster object using the specified params. -func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1beta1.Cluster, error) { - cluster := &capiv1beta1.Cluster{} +func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Cluster, error) { + cluster := &clusterv1beta1.Cluster{} key := client.ObjectKey{ Namespace: namespace, Name: name, @@ -76,7 +76,7 @@ func GetClusterByName(ctx context.Context, c client.Client, namespace, name stri } // IsPaused returns true if the Cluster is paused or the object has the `paused` annotation. -func IsPaused(cluster *capiv1beta1.Cluster, o metav1.Object) bool { +func IsPaused(cluster *clusterv1beta1.Cluster, o metav1.Object) bool { if cluster.Spec.Paused { return true } @@ -84,13 +84,13 @@ func IsPaused(cluster *capiv1beta1.Cluster, o metav1.Object) bool { } // GetOwnerMachine returns the Machine object owning the current resource. -func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1beta1.Machine, error) { +func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Machine, error) { for _, ref := range obj.GetOwnerReferences() { gv, err := schema.ParseGroupVersion(ref.APIVersion) if err != nil { return nil, err } - if ref.Kind == "Machine" && gv.Group == capiv1beta1.GroupVersion.Group { + if ref.Kind == "Machine" && gv.Group == clusterv1beta1.GroupVersion.Group { return GetMachineByName(ctx, c, obj.Namespace, ref.Name) } } @@ -98,8 +98,8 @@ func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta } // GetMachineByName finds and return a Machine object using the specified params. -func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1beta1.Machine, error) { - m := &capiv1beta1.Machine{} +func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Machine, error) { + m := &clusterv1beta1.Machine{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err From 7b87bfacdb79a8a4b7b1653a17a4211dcaf0250a Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Mon, 10 Nov 2025 15:11:24 +0100 Subject: [PATCH 3/5] Reconcile and use CAPI v1beta2 instead of v1beta1 --- cloud/interfaces.go | 8 ++-- cloud/scope/cluster.go | 37 ++++++++++++------- cloud/scope/machine.go | 27 +++++--------- cloud/scope/machine_test.go | 14 +++---- cloud/scope/managedcluster.go | 31 +++++++++++----- cloud/scope/managedcontrolplane.go | 13 ++++--- cloud/scope/managedmachinepool.go | 19 +++++----- cloud/scope/managedmachinepool_test.go | 8 ++-- .../compute/firewalls/reconcile_test.go | 8 ++-- .../compute/instances/reconcile_test.go | 25 +++++++------ .../compute/loadbalancers/reconcile.go | 6 +-- .../compute/loadbalancers/reconcile_test.go | 12 +++--- .../compute/networks/reconcile_test.go | 8 ++-- .../compute/subnets/reconcile_test.go | 8 ++-- .../services/container/nodepools/reconcile.go | 4 +- cloud/services/shared/machinepool.go | 6 +-- controllers/gcpcluster_controller.go | 5 ++- controllers/gcpmachine_controller.go | 10 ++--- .../gcpmachine_controller_unit_test.go | 28 +++++++------- controllers/suite_test.go | 4 +- .../gke/controllers/gkeconfig_controller.go | 14 +++---- .../gcpmanagedcluster_controller.go | 16 +++++--- .../gcpmanagedcontrolplane_controller.go | 4 +- .../gcpmanagedmachinepool_controller.go | 23 ++++++------ main.go | 6 +-- pkg/capiutils/predicates.go | 18 +++++---- pkg/capiutils/utils.go | 33 +++++++++-------- 27 files changed, 209 insertions(+), 186 deletions(-) diff --git a/cloud/interfaces.go b/cloud/interfaces.go index 14ac7e2db..58aeda69d 100644 --- a/cloud/interfaces.go +++ b/cloud/interfaces.go @@ -19,12 +19,12 @@ package cloud import ( "context" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" corev1 "k8s.io/api/core/v1" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // Cloud alias for cloud.Cloud interface. @@ -61,15 +61,15 @@ type ClusterGetter interface { SkipFirewallRuleCreation() bool Network() *infrav1.Network AdditionalLabels() infrav1.Labels - FailureDomains() clusterv1beta1.FailureDomains - ControlPlaneEndpoint() clusterv1beta1.APIEndpoint + FailureDomains() []string + ControlPlaneEndpoint() clusterv1.APIEndpoint ResourceManagerTags() infrav1.ResourceManagerTags LoadBalancer() infrav1.LoadBalancerSpec } // ClusterSetter is an interface which can set cluster information. type ClusterSetter interface { - SetControlPlaneEndpoint(endpoint clusterv1beta1.APIEndpoint) + SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) } // Cluster is an interface which can get and set cluster information. diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index e52a8fc97..287f09461 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -28,6 +28,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -36,7 +37,7 @@ import ( type ClusterScopeParams struct { GCPServices Client client.Client - Cluster *clusterv1beta1.Cluster + Cluster *clusterv1.Cluster GCPCluster *infrav1.GCPCluster } @@ -78,7 +79,7 @@ type ClusterScope struct { client client.Client patchHelper *patch.Helper - Cluster *clusterv1beta1.Cluster + Cluster *clusterv1.Cluster GCPCluster *infrav1.GCPCluster GCPServices } @@ -184,18 +185,25 @@ func (s *ClusterScope) ResourceManagerTags() infrav1.ResourceManagerTags { } // ControlPlaneEndpoint returns the cluster control-plane endpoint. -func (s *ClusterScope) ControlPlaneEndpoint() clusterv1beta1.APIEndpoint { - endpoint := s.GCPCluster.Spec.ControlPlaneEndpoint - endpoint.Port = 443 - if c := s.Cluster.Spec.ClusterNetwork; c != nil { - endpoint.Port = ptr.Deref(c.APIServerPort, 443) +func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { + endpoint := clusterv1.APIEndpoint{ + Host: s.GCPCluster.Spec.ControlPlaneEndpoint.Host, + Port: 443, + } + + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + endpoint.Port = s.Cluster.Spec.ClusterNetwork.APIServerPort } return endpoint } // FailureDomains returns the cluster failure domains. -func (s *ClusterScope) FailureDomains() clusterv1beta1.FailureDomains { - return s.GCPCluster.Status.FailureDomains +func (s *ClusterScope) FailureDomains() []string { + failureDomains := []string{} + for failureDomainName, _ := range s.GCPCluster.Status.FailureDomains { + failureDomains = append(failureDomains, failureDomainName) + } + return failureDomains } // ANCHOR_END: ClusterGetter @@ -213,8 +221,11 @@ func (s *ClusterScope) SetFailureDomains(fd clusterv1beta1.FailureDomains) { } // SetControlPlaneEndpoint sets cluster control-plane endpoint. -func (s *ClusterScope) SetControlPlaneEndpoint(endpoint clusterv1beta1.APIEndpoint) { - s.GCPCluster.Spec.ControlPlaneEndpoint = endpoint +func (s *ClusterScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { + s.GCPCluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ + Host: endpoint.Host, + Port: endpoint.Port, + } } // ANCHOR_END: ClusterSetter @@ -354,8 +365,8 @@ func (s *ClusterScope) BackendServiceSpec(lbname string) *compute.BackendService // ForwardingRuleSpec returns google compute forwarding-rule spec. func (s *ClusterScope) ForwardingRuleSpec(lbname string) *compute.ForwardingRule { port := int32(443) - if c := s.Cluster.Spec.ClusterNetwork; c != nil { - port = ptr.Deref(c.APIServerPort, 443) + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + port = s.Cluster.Spec.ClusterNetwork.APIServerPort } portRange := fmt.Sprintf("%d-%d", port, port) return &compute.ForwardingRule{ diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index c259b0698..9c29d0ae1 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/cloud/providerid" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/shared" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -45,7 +45,7 @@ import ( type MachineScopeParams struct { Client client.Client ClusterGetter cloud.ClusterGetter - Machine *clusterv1beta1.Machine + Machine *clusterv1.Machine GCPMachine *infrav1.GCPMachine } @@ -81,7 +81,7 @@ type MachineScope struct { client client.Client patchHelper *patch.Helper ClusterGetter cloud.ClusterGetter - Machine *clusterv1beta1.Machine + Machine *clusterv1.Machine GCPMachine *infrav1.GCPMachine } @@ -99,19 +99,15 @@ func (m *MachineScope) NetworkCloud() cloud.Cloud { // Zone returns the FailureDomain for the GCPMachine. func (m *MachineScope) Zone() string { - if m.Machine.Spec.FailureDomain == nil { + if m.Machine.Spec.FailureDomain == "" { fd := m.ClusterGetter.FailureDomains() if len(fd) == 0 { return "" } - zones := make([]string, 0, len(fd)) - for zone := range fd { - zones = append(zones, zone) - } - sort.Strings(zones) - return zones[0] + sort.Strings(fd) + return fd[0] } - return *m.Machine.Spec.FailureDomain + return m.Machine.Spec.FailureDomain } // Project return the project for the GCPMachine's cluster. @@ -150,8 +146,8 @@ func (m *MachineScope) Role() string { } // IsControlPlaneMachine checks machine is a control plane node. -func IsControlPlaneMachine(machine *clusterv1beta1.Machine) bool { - _, ok := machine.Labels[clusterv1beta1.MachineControlPlaneLabel] +func IsControlPlaneMachine(machine *clusterv1.Machine) bool { + _, ok := machine.Labels[clusterv1.MachineControlPlaneLabel] return ok } @@ -228,10 +224,7 @@ func (m *MachineScope) SetAddresses(addressList []corev1.NodeAddress) { // InstanceImageSpec returns compute instance image attched-disk spec. func (m *MachineScope) InstanceImageSpec() *compute.AttachedDisk { - version := "" - if m.Machine.Spec.Version != nil { - version = *m.Machine.Spec.Version - } + version := m.Machine.Spec.Version image := "capi-ubuntu-1804-k8s-" + strings.ReplaceAll(semver.MajorMinor(version), ".", "-") sourceImage := path.Join("projects", m.ClusterGetter.Project(), "global", "images", "family", image) if m.GCPMachine.Spec.Image != nil { diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index 909690f57..05bc3b175 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -28,9 +28,9 @@ func TestMachineLocalSSDDiskType(t *testing.T) { // New test machine, needed as parameter. failureDomain := "example.com" - testMachine := clusterv1beta1.Machine{ - Spec: clusterv1beta1.MachineSpec{ - FailureDomain: &failureDomain, + testMachine := clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + FailureDomain: failureDomain, }, } @@ -87,9 +87,9 @@ func TestInstanceNetworkInterfaceAliasIPRangesSpec(t *testing.T) { // Test machine parameter failureDomain := "us-central1-a" - testMachine := clusterv1beta1.Machine{ - Spec: clusterv1beta1.MachineSpec{ - FailureDomain: &failureDomain, + testMachine := clusterv1.Machine{ + Spec: clusterv1.MachineSpec{ + FailureDomain: failureDomain, }, } diff --git a/cloud/scope/managedcluster.go b/cloud/scope/managedcluster.go index 79b620651..03d30bf18 100644 --- a/cloud/scope/managedcluster.go +++ b/cloud/scope/managedcluster.go @@ -28,6 +28,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -36,7 +37,7 @@ import ( type ManagedClusterScopeParams struct { GCPServices Client client.Client - Cluster *clusterv1beta1.Cluster + Cluster *clusterv1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane } @@ -80,7 +81,7 @@ type ManagedClusterScope struct { client client.Client patchHelper *patch.Helper - Cluster *clusterv1beta1.Cluster + Cluster *clusterv1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane GCPServices @@ -172,15 +173,24 @@ func (s *ManagedClusterScope) ResourceManagerTags() infrav1.ResourceManagerTags } // ControlPlaneEndpoint returns the cluster control-plane endpoint. -func (s *ManagedClusterScope) ControlPlaneEndpoint() clusterv1beta1.APIEndpoint { - endpoint := s.GCPManagedCluster.Spec.ControlPlaneEndpoint - endpoint.Port = ptr.Deref(s.Cluster.Spec.ClusterNetwork.APIServerPort, 443) +func (s *ManagedClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { + endpoint := clusterv1.APIEndpoint{ + Host: s.GCPManagedCluster.Spec.ControlPlaneEndpoint.Host, + Port: 443, + } + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + endpoint.Port = s.Cluster.Spec.ClusterNetwork.APIServerPort + } return endpoint } // FailureDomains returns the cluster failure domains. -func (s *ManagedClusterScope) FailureDomains() clusterv1beta1.FailureDomains { - return s.GCPManagedCluster.Status.FailureDomains +func (s *ManagedClusterScope) FailureDomains() []string { + failureDomains := []string{} + for failureDomainName, _ := range s.GCPManagedCluster.Status.FailureDomains { + failureDomains = append(failureDomains, failureDomainName) + } + return failureDomains } // ANCHOR_END: ClusterGetter @@ -198,8 +208,11 @@ func (s *ManagedClusterScope) SetFailureDomains(fd clusterv1beta1.FailureDomains } // SetControlPlaneEndpoint sets cluster control-plane endpoint. -func (s *ManagedClusterScope) SetControlPlaneEndpoint(endpoint clusterv1beta1.APIEndpoint) { - s.GCPManagedCluster.Spec.ControlPlaneEndpoint = endpoint +func (s *ManagedClusterScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { + s.GCPManagedCluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ + Host: endpoint.Host, + Port: endpoint.Port, + } } // ANCHOR_END: ClusterSetter diff --git a/cloud/scope/managedcontrolplane.go b/cloud/scope/managedcontrolplane.go index b11dd2432..5833694d6 100644 --- a/cloud/scope/managedcontrolplane.go +++ b/cloud/scope/managedcontrolplane.go @@ -30,6 +30,7 @@ import ( "github.com/pkg/errors" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -45,7 +46,7 @@ type ManagedControlPlaneScopeParams struct { ManagedClusterClient *container.ClusterManagerClient TagBindingsClient *resourcemanager.TagBindingsClient Client client.Client - Cluster *clusterv1beta1.Cluster + Cluster *clusterv1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane } @@ -114,7 +115,7 @@ type ManagedControlPlaneScope struct { client client.Client patchHelper *v1beta1patch.Helper - Cluster *clusterv1beta1.Cluster + Cluster *clusterv1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane mcClient *container.ClusterManagerClient @@ -122,7 +123,7 @@ type ManagedControlPlaneScope struct { credentialsClient *credentials.IamCredentialsClient credential *Credential - AllMachinePools []clusterv1beta1.MachinePool + AllMachinePools []clusterv1.MachinePool AllManagedMachinePools []infrav1exp.GCPManagedMachinePool } @@ -178,14 +179,14 @@ func (s *ManagedControlPlaneScope) GetCredential() *Credential { } // GetAllNodePools gets all node pools for the control plane. -func (s *ManagedControlPlaneScope) GetAllNodePools(ctx context.Context) ([]infrav1exp.GCPManagedMachinePool, []clusterv1beta1.MachinePool, error) { +func (s *ManagedControlPlaneScope) GetAllNodePools(ctx context.Context) ([]infrav1exp.GCPManagedMachinePool, []clusterv1.MachinePool, error) { if len(s.AllManagedMachinePools) == 0 { listOptions := []client.ListOption{ client.InNamespace(s.GCPManagedControlPlane.Namespace), - client.MatchingLabels(map[string]string{clusterv1beta1.ClusterNameLabel: s.Cluster.Name}), + client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: s.Cluster.Name}), } - machinePoolList := &clusterv1beta1.MachinePoolList{} + machinePoolList := &clusterv1.MachinePoolList{} if err := s.client.List(ctx, machinePoolList, listOptions...); err != nil { return nil, nil, err } diff --git a/cloud/scope/managedmachinepool.go b/cloud/scope/managedmachinepool.go index 14ac88d75..c08f20c51 100644 --- a/cloud/scope/managedmachinepool.go +++ b/cloud/scope/managedmachinepool.go @@ -34,6 +34,7 @@ import ( "github.com/pkg/errors" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -43,8 +44,8 @@ type ManagedMachinePoolScopeParams struct { ManagedClusterClient *container.ClusterManagerClient InstanceGroupManagersClient *compute.InstanceGroupManagersClient Client client.Client - Cluster *clusterv1beta1.Cluster - MachinePool *clusterv1beta1.MachinePool + Cluster *clusterv1.Cluster + MachinePool *clusterv1.MachinePool GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane GCPManagedMachinePool *infrav1exp.GCPManagedMachinePool @@ -106,8 +107,8 @@ type ManagedMachinePoolScope struct { client client.Client patchHelper *v1beta1patch.Helper - Cluster *clusterv1beta1.Cluster - MachinePool *clusterv1beta1.MachinePool + Cluster *clusterv1.Cluster + MachinePool *clusterv1.MachinePool GCPManagedCluster *infrav1exp.GCPManagedCluster GCPManagedControlPlane *infrav1exp.GCPManagedControlPlane GCPManagedMachinePool *infrav1exp.GCPManagedMachinePool @@ -151,7 +152,7 @@ func (s *ManagedMachinePoolScope) InstanceGroupManagersClient() *compute.Instanc } // NodePoolVersion returns the k8s version of the node pool. -func (s *ManagedMachinePoolScope) NodePoolVersion() *string { +func (s *ManagedMachinePoolScope) NodePoolVersion() string { return s.MachinePool.Spec.Template.Spec.Version } @@ -166,7 +167,7 @@ func NodePoolResourceLabels(additionalLabels infrav1.Labels, clusterName string) } // ConvertToSdkNodePool converts a node pool to format that is used by GCP SDK. -func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1beta1.MachinePool, regional bool, clusterName string) *containerpb.NodePool { +func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool clusterv1.MachinePool, regional bool, clusterName string) *containerpb.NodePool { replicas := *machinePool.Spec.Replicas if regional { if len(nodePool.Spec.NodeLocations) != 0 { @@ -268,14 +269,14 @@ func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool Type: containerpb.SandboxConfig_GVISOR, } } - if machinePool.Spec.Template.Spec.Version != nil { - sdkNodePool.Version = strings.Replace(*machinePool.Spec.Template.Spec.Version, "v", "", 1) + if machinePool.Spec.Template.Spec.Version != "" { + sdkNodePool.Version = strings.Replace(machinePool.Spec.Template.Spec.Version, "v", "", 1) } return &sdkNodePool } // ConvertToSdkNodePools converts node pools to format that is used by GCP SDK. -func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1beta1.MachinePool, regional bool, clusterName string) []*containerpb.NodePool { +func ConvertToSdkNodePools(nodePools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1.MachinePool, regional bool, clusterName string) []*containerpb.NodePool { res := []*containerpb.NodePool{} for i := range nodePools { res = append(res, ConvertToSdkNodePool(nodePools[i], machinePools[i], regional, clusterName)) diff --git a/cloud/scope/managedmachinepool_test.go b/cloud/scope/managedmachinepool_test.go index 3eff7504f..765007a64 100644 --- a/cloud/scope/managedmachinepool_test.go +++ b/cloud/scope/managedmachinepool_test.go @@ -8,12 +8,12 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( TestGCPMMP *v1beta1.GCPManagedMachinePool - TestMP *clusterv1beta1.MachinePool + TestMP *clusterv1.MachinePool TestClusterName string ) @@ -36,8 +36,8 @@ var _ = Describe("GCPManagedMachinePool Scope", func() { }, }, } - TestMP = &clusterv1beta1.MachinePool{ - Spec: clusterv1beta1.MachinePoolSpec{ + TestMP = &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: &replicas, }, } diff --git a/cloud/services/compute/firewalls/reconcile_test.go b/cloud/services/compute/firewalls/reconcile_test.go index 2e55747a0..ec19cc3c9 100644 --- a/cloud/services/compute/firewalls/reconcile_test.go +++ b/cloud/services/compute/firewalls/reconcile_test.go @@ -32,21 +32,21 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1beta1.AddToScheme(scheme.Scheme) + _ = clusterv1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } -var fakeCluster = &clusterv1beta1.Cluster{ +var fakeCluster = &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1beta1.ClusterSpec{}, + Spec: clusterv1.ClusterSpec{}, } var fakeGCPCluster = &infrav1.GCPCluster{ diff --git a/cloud/services/compute/instances/reconcile_test.go b/cloud/services/compute/instances/reconcile_test.go index d27d0d220..859a3701c 100644 --- a/cloud/services/compute/instances/reconcile_test.go +++ b/cloud/services/compute/instances/reconcile_test.go @@ -36,11 +36,12 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1beta1.AddToScheme(scheme.Scheme) + _ = clusterv1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } @@ -54,38 +55,38 @@ var fakeBootstrapSecret = &corev1.Secret{ }, } -var fakeCluster = &clusterv1beta1.Cluster{ +var fakeCluster = &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1beta1.ClusterSpec{}, + Spec: clusterv1.ClusterSpec{}, } -var fakeMachine = &clusterv1beta1.Machine{ +var fakeMachine = &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", Namespace: "default", }, - Spec: clusterv1beta1.MachineSpec{ - Bootstrap: clusterv1beta1.Bootstrap{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("my-cluster-bootstrap"), }, - FailureDomain: ptr.To[string]("us-central1-c"), - Version: ptr.To[string]("v1.19.11"), + FailureDomain: "us-central1-c", + Version: "v1.19.11", }, } -var fakeMachineWithOutFailureDomain = &clusterv1beta1.Machine{ +var fakeMachineWithOutFailureDomain = &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", Namespace: "default", }, - Spec: clusterv1beta1.MachineSpec{ - Bootstrap: clusterv1beta1.Bootstrap{ + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("my-cluster-bootstrap"), }, - Version: ptr.To[string]("v1.19.11"), + Version: "v1.19.11", }, } diff --git a/cloud/services/compute/loadbalancers/reconcile.go b/cloud/services/compute/loadbalancers/reconcile.go index 39fa72b1a..5ad8ad53e 100644 --- a/cloud/services/compute/loadbalancers/reconcile.go +++ b/cloud/services/compute/loadbalancers/reconcile.go @@ -254,11 +254,7 @@ func (s *Service) createInternalLoadBalancer(ctx context.Context, name string, l func (s *Service) createOrGetInstanceGroups(ctx context.Context) ([]*compute.InstanceGroup, error) { log := log.FromContext(ctx) - fd := s.scope.FailureDomains() - zones := make([]string, 0, len(fd)) - for zone := range fd { - zones = append(zones, zone) - } + zones := s.scope.FailureDomains() groups := make([]*compute.InstanceGroup, 0, len(zones)) groupsMap := s.scope.Network().APIServerInstanceGroups diff --git a/cloud/services/compute/loadbalancers/reconcile_test.go b/cloud/services/compute/loadbalancers/reconcile_test.go index 909a18a50..d7f25c838 100644 --- a/cloud/services/compute/loadbalancers/reconcile_test.go +++ b/cloud/services/compute/loadbalancers/reconcile_test.go @@ -32,13 +32,14 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) var lbTypeInternal = infrav1.Internal func init() { - _ = clusterv1beta1.AddToScheme(scheme.Scheme) + _ = clusterv1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } @@ -47,12 +48,12 @@ func getBaseClusterScope() (*scope.ClusterScope, error) { WithScheme(scheme.Scheme). Build() - fakeCluster := &clusterv1beta1.Cluster{ + fakeCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1beta1.ClusterSpec{}, + Spec: clusterv1.ClusterSpec{}, } fakeGCPCluster := &infrav1.GCPCluster{ @@ -123,9 +124,8 @@ func getBaseClusterScopeWithPortSet() (*scope.ClusterScope, error) { return nil, err } - port := int32(6443) - clusterScope.Cluster.Spec.ClusterNetwork = &clusterv1beta1.ClusterNetwork{ - APIServerPort: &port, + clusterScope.Cluster.Spec.ClusterNetwork = clusterv1.ClusterNetwork{ + APIServerPort: 6443, } return clusterScope, nil } diff --git a/cloud/services/compute/networks/reconcile_test.go b/cloud/services/compute/networks/reconcile_test.go index 827252137..11b8187ef 100644 --- a/cloud/services/compute/networks/reconcile_test.go +++ b/cloud/services/compute/networks/reconcile_test.go @@ -31,21 +31,21 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1beta1.AddToScheme(scheme.Scheme) + _ = clusterv1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } -var fakeCluster = &clusterv1beta1.Cluster{ +var fakeCluster = &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1beta1.ClusterSpec{}, + Spec: clusterv1.ClusterSpec{}, } var fakeGCPCluster = &infrav1.GCPCluster{ diff --git a/cloud/services/compute/subnets/reconcile_test.go b/cloud/services/compute/subnets/reconcile_test.go index ec7abb00e..7b724093d 100644 --- a/cloud/services/compute/subnets/reconcile_test.go +++ b/cloud/services/compute/subnets/reconcile_test.go @@ -33,21 +33,21 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { - _ = clusterv1beta1.AddToScheme(scheme.Scheme) + _ = clusterv1.AddToScheme(scheme.Scheme) _ = infrav1.AddToScheme(scheme.Scheme) } -var fakeCluster = &clusterv1beta1.Cluster{ +var fakeCluster = &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", Namespace: "default", }, - Spec: clusterv1beta1.ClusterSpec{}, + Spec: clusterv1.ClusterSpec{}, } var fakeGCPCluster = &infrav1.GCPCluster{ diff --git a/cloud/services/container/nodepools/reconcile.go b/cloud/services/container/nodepools/reconcile.go index 52f42d31a..31f8d485d 100644 --- a/cloud/services/container/nodepools/reconcile.go +++ b/cloud/services/container/nodepools/reconcile.go @@ -355,8 +355,8 @@ func (s *Service) checkDiffAndPrepareUpdateConfig(existingNodePool *containerpb. desiredNodePool := scope.ConvertToSdkNodePool(*s.scope.GCPManagedMachinePool, *s.scope.MachinePool, isRegional, s.scope.GCPManagedControlPlane.Spec.ClusterName) // Node version - if s.scope.NodePoolVersion() != nil { - desiredNodePoolVersion := infrav1exp.ConvertFromSdkNodeVersion(*s.scope.NodePoolVersion()) + if s.scope.NodePoolVersion() != "" { + desiredNodePoolVersion := infrav1exp.ConvertFromSdkNodeVersion(s.scope.NodePoolVersion()) if desiredNodePoolVersion != infrav1exp.ConvertFromSdkNodeVersion(existingNodePool.GetVersion()) { needUpdate = true updateNodePoolRequest.NodeVersion = desiredNodePoolVersion diff --git a/cloud/services/shared/machinepool.go b/cloud/services/shared/machinepool.go index 87afa8aec..9f4cac82b 100644 --- a/cloud/services/shared/machinepool.go +++ b/cloud/services/shared/machinepool.go @@ -21,14 +21,14 @@ import ( "fmt" "strings" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api-provider-gcp/cloud" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" ) // ManagedMachinePoolPreflightCheck will perform checks against the machine pool before its created. -func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachinePool, machinePool *clusterv1beta1.MachinePool, location string) error { +func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachinePool, machinePool *clusterv1.MachinePool, location string) error { if machinePool.Spec.Template.Spec.InfrastructureRef.Name != managedPool.Name { return fmt.Errorf("expect machinepool infraref (%s) to match managed machine pool name (%s)", machinePool.Spec.Template.Spec.InfrastructureRef.Name, managedPool.Name) } @@ -49,7 +49,7 @@ func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachineP } // ManagedMachinePoolsPreflightCheck will perform checks against a slice of machine pool before they are created. -func ManagedMachinePoolsPreflightCheck(managedPools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1beta1.MachinePool, location string) error { +func ManagedMachinePoolsPreflightCheck(managedPools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1.MachinePool, location string) error { if len(machinePools) != len(managedPools) { return errors.New("each machinepool must have a matching gcpmanagedmachinepool") } diff --git a/controllers/gcpcluster_controller.go b/controllers/gcpcluster_controller.go index 6b8674c97..083edbf2f 100644 --- a/controllers/gcpcluster_controller.go +++ b/controllers/gcpcluster_controller.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" @@ -75,7 +76,7 @@ func (r *GCPClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma clusterToInfraFn := util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("GCPCluster"), mgr.GetClient(), &infrav1.GCPCluster{}) if err = c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(func(mapCtx context.Context, o client.Object) []reconcile.Request { requests := clusterToInfraFn(mapCtx, o) if requests == nil { @@ -120,7 +121,7 @@ func (r *GCPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // Fetch the Cluster. - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) if err != nil { log.Error(err, "Failed to get owner cluster") return ctrl.Result{}, err diff --git a/controllers/gcpmachine_controller.go b/controllers/gcpmachine_controller.go index f56d8b72b..c318fedd6 100644 --- a/controllers/gcpmachine_controller.go +++ b/controllers/gcpmachine_controller.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/instances" "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" @@ -61,7 +61,7 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma For(&infrav1.GCPMachine{}). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), ctrl.LoggerFrom(ctx), r.WatchFilterValue)). Watches( - &clusterv1beta1.Machine{}, + &clusterv1.Machine{}, handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("GCPMachine"))), ). Watches( @@ -80,7 +80,7 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma // Add a watch on clusterv1.Cluster object for unpause & ready notifications. if err := c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), )); err != nil { @@ -112,8 +112,8 @@ func (r *GCPMachineReconciler) GCPClusterToGCPMachines(ctx context.Context) hand return result } - labels := map[string]string{clusterv1beta1.ClusterNameLabel: cluster.Name} - machineList := &clusterv1beta1.MachineList{} + labels := map[string]string{clusterv1.ClusterNameLabel: cluster.Name} + machineList := &clusterv1.MachineList{} if err := r.List(mapCtx, machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil { log.Error(err, "failed to list Machines") return nil diff --git a/controllers/gcpmachine_controller_unit_test.go b/controllers/gcpmachine_controller_unit_test.go index f5ca27f88..840ea12ac 100644 --- a/controllers/gcpmachine_controller_unit_test.go +++ b/controllers/gcpmachine_controller_unit_test.go @@ -21,20 +21,19 @@ import ( "testing" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func newMachine(clusterName, machineName string) *clusterv1beta1.Machine { - return &clusterv1beta1.Machine{ +func newMachine(clusterName, machineName string) *clusterv1.Machine { + return &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - clusterv1beta1.ClusterNameLabel: clusterName, + clusterv1.ClusterNameLabel: clusterName, }, Name: machineName, Namespace: "default", @@ -42,20 +41,19 @@ func newMachine(clusterName, machineName string) *clusterv1beta1.Machine { } } -func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1beta1.Machine { +func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1.Machine { m := newMachine(clusterName, machineName) - m.Spec.InfrastructureRef = corev1.ObjectReference{ - Kind: "GCPMachine", - Namespace: "", - Name: "gcp" + machineName, - APIVersion: infrav1.GroupVersion.String(), + m.Spec.InfrastructureRef = clusterv1.ContractVersionedObjectReference{ + Kind: "GCPMachine", + Name: "gcp" + machineName, + APIGroup: infrav1.GroupVersion.Group, } return m } -func newCluster(name string) *clusterv1beta1.Cluster { - return &clusterv1beta1.Cluster{ +func newCluster(name string) *clusterv1.Cluster { + return &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "default", @@ -70,7 +68,7 @@ func TestGCPMachineReconciler_GCPClusterToGCPMachines(t *testing.T) { scheme := runtime.NewScheme() g.Expect(infrav1.AddToScheme(scheme)).To(Succeed()) - g.Expect(clusterv1beta1.AddToScheme(scheme)).To(Succeed()) + g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) clusterName := "my-cluster" initObjects := []runtime.Object{ @@ -96,7 +94,7 @@ func TestGCPMachineReconciler_GCPClusterToGCPMachines(t *testing.T) { { Name: clusterName, Kind: "Cluster", - APIVersion: clusterv1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, diff --git a/controllers/suite_test.go b/controllers/suite_test.go index da5d83e0e..7527e75d9 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -69,7 +69,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).ToNot(BeNil()) - Expect(clusterv1beta1.AddToScheme(scheme.Scheme)).To(Succeed()) + Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) Expect(infrav1.AddToScheme(scheme.Scheme)).To(Succeed()) // +kubebuilder:scaffold:scheme diff --git a/exp/bootstrap/gke/controllers/gkeconfig_controller.go b/exp/bootstrap/gke/controllers/gkeconfig_controller.go index fd22206e8..1bb1f8eb5 100644 --- a/exp/bootstrap/gke/controllers/gkeconfig_controller.go +++ b/exp/bootstrap/gke/controllers/gkeconfig_controller.go @@ -34,7 +34,7 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" bootstrapv1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/bootstrap/gke/api/v1beta1" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -97,7 +97,7 @@ func (r *GKEConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( gcpMP := &infrav1exp.GCPManagedMachinePool{} gcpMPKey := types.NamespacedName{ Name: machinePool.Spec.Template.Spec.InfrastructureRef.Name, - Namespace: machinePool.Spec.Template.Spec.InfrastructureRef.Namespace, + Namespace: machinePool.Namespace, } if err := r.Get(ctx, gcpMPKey, gcpMP); err != nil { if apierrors.IsNotFound(err) { @@ -148,13 +148,13 @@ func (r *GKEConfigReconciler) ManagedMachinePoolToGKEConfigMapFunc(_ context.Con { NamespacedName: client.ObjectKey{ Name: machinePool.Spec.Template.Spec.InfrastructureRef.Name, - Namespace: machinePool.Spec.Template.Spec.InfrastructureRef.Namespace, + Namespace: machinePool.Namespace, }, }, } } -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -163,7 +163,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == clusterv1beta1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -171,8 +171,8 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object return nil, nil } -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.MachinePool, error) { - m := &clusterv1beta1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachinePool, error) { + m := &clusterv1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err diff --git a/exp/controllers/gcpmanagedcluster_controller.go b/exp/controllers/gcpmanagedcluster_controller.go index a545febda..45ee3ef47 100644 --- a/exp/controllers/gcpmanagedcluster_controller.go +++ b/exp/controllers/gcpmanagedcluster_controller.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" @@ -101,7 +102,7 @@ func (r *GCPManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re controlPlane := &infrav1exp.GCPManagedControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } log.V(4).Info("getting control plane ", "ref", controlPlaneRef) @@ -156,7 +157,7 @@ func (r *GCPManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr } if err = c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1exp.GroupVersion.WithKind("GCPManagedCluster"), mgr.GetClient(), &infrav1exp.GCPManagedCluster{})), predicates.ClusterUnpaused(mgr.GetScheme(), log), predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue), @@ -212,7 +213,10 @@ func (r *GCPManagedClusterReconciler) reconcile(ctx context.Context, clusterScop record.Event(clusterScope.GCPManagedCluster, "GCPManagedClusterReconcile", "Ready") controlPlaneEndpoint := clusterScope.GCPManagedControlPlane.Spec.Endpoint - clusterScope.SetControlPlaneEndpoint(controlPlaneEndpoint) + clusterScope.SetControlPlaneEndpoint(clusterv1.APIEndpoint{ + Host: controlPlaneEndpoint.Host, + Port: controlPlaneEndpoint.Port, + }) if controlPlaneEndpoint.IsZero() { log.Info("GCPManagedControlplane does not have endpoint yet. Reconciling") @@ -291,7 +295,7 @@ func (r *GCPManagedClusterReconciler) managedControlPlaneMapper() handler.MapFun } managedClusterRef := cluster.Spec.InfrastructureRef - if managedClusterRef == nil || managedClusterRef.Kind != "GCPManagedCluster" { + if !managedClusterRef.IsDefined() || managedClusterRef.Kind != "GCPManagedCluster" { log.Info("InfrastructureRef is nil or not GCPManagedCluster, skipping mapping") return nil } @@ -300,7 +304,7 @@ func (r *GCPManagedClusterReconciler) managedControlPlaneMapper() handler.MapFun { NamespacedName: types.NamespacedName{ Name: managedClusterRef.Name, - Namespace: managedClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } @@ -315,7 +319,7 @@ func (r *GCPManagedClusterReconciler) dependencyCount(ctx context.Context, clust listOptions := []client.ListOption{ client.InNamespace(clusterNamespace), - client.MatchingLabels(map[string]string{clusterv1beta1.ClusterNameLabel: clusterName}), + client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), } managedMachinePools := &infrav1exp.GCPManagedMachinePoolList{} diff --git a/exp/controllers/gcpmanagedcontrolplane_controller.go b/exp/controllers/gcpmanagedcontrolplane_controller.go index b6f577863..28e30fed6 100644 --- a/exp/controllers/gcpmanagedcontrolplane_controller.go +++ b/exp/controllers/gcpmanagedcontrolplane_controller.go @@ -29,7 +29,7 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" @@ -73,7 +73,7 @@ func (r *GCPManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, } if err = c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, gcpManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &infrav1exp.GCPManagedControlPlane{})), capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), )); err != nil { diff --git a/exp/controllers/gcpmanagedmachinepool_controller.go b/exp/controllers/gcpmanagedmachinepool_controller.go index e29e5d3cd..9dce03983 100644 --- a/exp/controllers/gcpmanagedmachinepool_controller.go +++ b/exp/controllers/gcpmanagedmachinepool_controller.go @@ -41,6 +41,7 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -69,7 +70,7 @@ func GetOwnerClusterKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) { if err != nil { return nil, errors.WithStack(err) } - if gv.Group == clusterv1beta1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return &client.ObjectKey{ Namespace: obj.Namespace, Name: ref.Name, @@ -81,14 +82,14 @@ func GetOwnerClusterKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) { func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*clusterv1beta1.MachinePool) + m, ok := o.(*clusterv1.MachinePool) if !ok { panic(fmt.Sprintf("Expected a MachinePool but got a %T", o)) } gk := gvk.GroupKind() // Return early if the GroupKind doesn't match what we expect - infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupKind() if gk != infraGK { log.FromContext(ctx).Info("gk does not match", "gk", gk, "infraGK", infraGK) return nil @@ -125,9 +126,9 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema. return nil } - managedPoolForClusterList := clusterv1beta1.MachinePoolList{} + managedPoolForClusterList := clusterv1.MachinePoolList{} if err := c.List( - ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterKey.Name}, + ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, ); err != nil { log.Error(err, "couldn't list pools for cluster") return nil @@ -159,7 +160,7 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, For(&infrav1exp.GCPManagedMachinePool{}). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log, r.WatchFilterValue)). Watches( - &clusterv1beta1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -178,7 +179,7 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, // Add a watch on clusterv1.Cluster object for unpause & ready notifications. if err := c.Watch( - source.Kind[client.Object](mgr.GetCache(), &clusterv1beta1.Cluster{}, + source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), )); err != nil { @@ -189,8 +190,8 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, } // getMachinePoolByName finds and return a Machine object using the specified params. -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.MachinePool, error) { - m := &clusterv1beta1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachinePool, error) { + m := &clusterv1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err @@ -199,7 +200,7 @@ func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name } // getOwnerMachinePool returns the MachinePool object owning the current resource. -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -208,7 +209,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == clusterv1beta1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } diff --git a/main.go b/main.go index 416d87ee5..dc85a557f 100644 --- a/main.go +++ b/main.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/feature" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" "sigs.k8s.io/cluster-api-provider-gcp/version" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/util/record" @@ -63,7 +63,7 @@ func init() { _ = clientgoscheme.AddToScheme(scheme) _ = infrav1beta1.AddToScheme(scheme) - _ = clusterv1beta1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) _ = gkebootstrapv1exp.AddToScheme(scheme) // +kubebuilder:scaffold:scheme @@ -352,7 +352,7 @@ func initFlags(fs *pflag.FlagSet) { &watchFilterValue, "watch-filter", "", - fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1beta1.WatchLabel), + fmt.Sprintf("Label value that the controller watches to reconcile cluster-api objects. Label key is always %s. If unspecified, the controller watches for all cluster-api objects.", clusterv1.WatchLabel), ) fs.IntVar(&gcpClusterConcurrency, diff --git a/pkg/capiutils/predicates.go b/pkg/capiutils/predicates.go index 0b242569e..73436a89f 100644 --- a/pkg/capiutils/predicates.go +++ b/pkg/capiutils/predicates.go @@ -27,13 +27,14 @@ import ( "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/cluster-api/util/predicates" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ClusterUpdateInfraReady returns a predicate that returns true for an update event when a cluster has Status.InfrastructureReady changed from false to true @@ -46,15 +47,15 @@ func ClusterUpdateInfraReady(scheme *runtime.Scheme, logger logr.Logger) predica log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) } - oldCluster, ok := e.ObjectOld.(*clusterv1beta1.Cluster) + oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) if !ok { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - newCluster := e.ObjectNew.(*clusterv1beta1.Cluster) + newCluster := e.ObjectNew.(*clusterv1.Cluster) - if !oldCluster.Status.InfrastructureReady && newCluster.Status.InfrastructureReady { + if !ptr.Deref(oldCluster.Status.Initialization.InfrastructureProvisioned, false) && ptr.Deref(newCluster.Status.Initialization.InfrastructureProvisioned, false) { log.V(6).Info("Cluster infrastructure became ready, allowing further processing") return true } @@ -69,6 +70,7 @@ func ClusterUpdateInfraReady(scheme *runtime.Scheme, logger logr.Logger) predica } // ClusterPausedTransitions returns a predicate that returns true for an update event when a cluster has Spec.Paused changed. +// FIXME(chrischdi): use new predicates from CAPI func ClusterPausedTransitions(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { return predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { @@ -77,20 +79,20 @@ func ClusterPausedTransitions(scheme *runtime.Scheme, logger logr.Logger) predic log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) } - oldCluster, ok := e.ObjectOld.(*clusterv1beta1.Cluster) + oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) if !ok { log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) return false } - newCluster := e.ObjectNew.(*clusterv1beta1.Cluster) + newCluster := e.ObjectNew.(*clusterv1.Cluster) - if oldCluster.Spec.Paused && !newCluster.Spec.Paused { + if ptr.Deref(oldCluster.Spec.Paused, false) && !ptr.Deref(newCluster.Spec.Paused, false) { log.V(6).Info("Cluster unpausing, allowing further processing") return true } - if !oldCluster.Spec.Paused && newCluster.Spec.Paused { + if !ptr.Deref(oldCluster.Spec.Paused, false) && ptr.Deref(newCluster.Spec.Paused, false) { log.V(6).Info("Cluster pausing, allowing further processing") return true } diff --git a/pkg/capiutils/utils.go b/pkg/capiutils/utils.go index 1617ececf..f27d31020 100644 --- a/pkg/capiutils/utils.go +++ b/pkg/capiutils/utils.go @@ -21,22 +21,23 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" ) // IsControlPlaneMachine checks machine is a control plane node. -func IsControlPlaneMachine(machine *clusterv1beta1.Machine) bool { - _, ok := machine.Labels[clusterv1beta1.MachineControlPlaneLabel] +func IsControlPlaneMachine(machine *clusterv1.Machine) bool { + _, ok := machine.Labels[clusterv1.MachineControlPlaneLabel] return ok } // GetOwnerCluster returns the Cluster object owning the current resource. -func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Cluster, error) { +func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { for _, ref := range obj.GetOwnerReferences() { if ref.Kind != "Cluster" { continue @@ -45,7 +46,7 @@ func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta if err != nil { return nil, errors.WithStack(err) } - if gv.Group == clusterv1beta1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return GetClusterByName(ctx, c, obj.Namespace, ref.Name) } } @@ -53,16 +54,16 @@ func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta } // GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. -func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Cluster, error) { - if obj.Labels[clusterv1beta1.ClusterNameLabel] == "" { +func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { + if obj.Labels[clusterv1.ClusterNameLabel] == "" { return nil, errors.WithStack(util.ErrNoCluster) } - return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1beta1.ClusterNameLabel]) + return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.ClusterNameLabel]) } // GetClusterByName finds and return a Cluster object using the specified params. -func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Cluster, error) { - cluster := &clusterv1beta1.Cluster{} +func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Cluster, error) { + cluster := &clusterv1.Cluster{} key := client.ObjectKey{ Namespace: namespace, Name: name, @@ -76,21 +77,21 @@ func GetClusterByName(ctx context.Context, c client.Client, namespace, name stri } // IsPaused returns true if the Cluster is paused or the object has the `paused` annotation. -func IsPaused(cluster *clusterv1beta1.Cluster, o metav1.Object) bool { - if cluster.Spec.Paused { +func IsPaused(cluster *clusterv1.Cluster, o metav1.Object) bool { + if ptr.Deref(cluster.Spec.Paused, false) { return true } return annotations.HasPaused(o) } // GetOwnerMachine returns the Machine object owning the current resource. -func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Machine, error) { +func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Machine, error) { for _, ref := range obj.GetOwnerReferences() { gv, err := schema.ParseGroupVersion(ref.APIVersion) if err != nil { return nil, err } - if ref.Kind == "Machine" && gv.Group == clusterv1beta1.GroupVersion.Group { + if ref.Kind == "Machine" && gv.Group == clusterv1.GroupVersion.Group { return GetMachineByName(ctx, c, obj.Namespace, ref.Name) } } @@ -98,8 +99,8 @@ func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta } // GetMachineByName finds and return a Machine object using the specified params. -func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Machine, error) { - m := &clusterv1beta1.Machine{} +func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) { + m := &clusterv1.Machine{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err From eb0efe6dec462f7710ea59606e02088b752b3a6f Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Mon, 10 Nov 2025 17:46:44 +0100 Subject: [PATCH 4/5] Replace usage of pkg/capiutils with upstream packages --- cloud/scope/cluster.go | 2 +- cloud/scope/managedcluster.go | 2 +- controllers/gcpcluster_controller.go | 3 +- controllers/gcpmachine_controller.go | 12 +- .../gcpmanagedcluster_controller.go | 8 +- .../gcpmanagedcontrolplane_controller.go | 8 +- .../gcpmanagedmachinepool_controller.go | 8 +- pkg/capiutils/predicates.go | 126 ------------------ pkg/capiutils/utils.go | 109 --------------- 9 files changed, 21 insertions(+), 257 deletions(-) delete mode 100644 pkg/capiutils/predicates.go delete mode 100644 pkg/capiutils/utils.go diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 287f09461..59b7c291f 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -200,7 +200,7 @@ func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { // FailureDomains returns the cluster failure domains. func (s *ClusterScope) FailureDomains() []string { failureDomains := []string{} - for failureDomainName, _ := range s.GCPCluster.Status.FailureDomains { + for failureDomainName := range s.GCPCluster.Status.FailureDomains { failureDomains = append(failureDomains, failureDomainName) } return failureDomains diff --git a/cloud/scope/managedcluster.go b/cloud/scope/managedcluster.go index 03d30bf18..03de2c201 100644 --- a/cloud/scope/managedcluster.go +++ b/cloud/scope/managedcluster.go @@ -187,7 +187,7 @@ func (s *ManagedClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { // FailureDomains returns the cluster failure domains. func (s *ManagedClusterScope) FailureDomains() []string { failureDomains := []string{} - for failureDomainName, _ := range s.GCPManagedCluster.Status.FailureDomains { + for failureDomainName := range s.GCPManagedCluster.Status.FailureDomains { failureDomains = append(failureDomains, failureDomainName) } return failureDomains diff --git a/controllers/gcpcluster_controller.go b/controllers/gcpcluster_controller.go index 083edbf2f..e84281a5c 100644 --- a/controllers/gcpcluster_controller.go +++ b/controllers/gcpcluster_controller.go @@ -31,7 +31,6 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/loadbalancers" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/networks" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/subnets" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -131,7 +130,7 @@ func (r *GCPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpCluster) { + if annotations.IsPaused(cluster, gcpCluster) { log.Info("GCPCluster of linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } diff --git a/controllers/gcpmachine_controller.go b/controllers/gcpmachine_controller.go index c318fedd6..d08a45ad7 100644 --- a/controllers/gcpmachine_controller.go +++ b/controllers/gcpmachine_controller.go @@ -26,10 +26,10 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/instances" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" @@ -82,7 +82,7 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma if err := c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), - capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), )); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -103,7 +103,7 @@ func (r *GCPMachineReconciler) GCPClusterToGCPMachines(ctx context.Context) hand return nil } - cluster, err := capiutils.GetOwnerCluster(mapCtx, r.Client, c.ObjectMeta) + cluster, err := util.GetOwnerCluster(mapCtx, r.Client, c.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: return result @@ -145,7 +145,7 @@ func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - machine, err := capiutils.GetOwnerMachine(ctx, r.Client, gcpMachine.ObjectMeta) + machine, err := util.GetOwnerMachine(ctx, r.Client, gcpMachine.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -155,14 +155,14 @@ func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) } log = log.WithValues("machine", machine.Name) - cluster, err := capiutils.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { log.Info("Machine is missing cluster label or cluster does not exist") return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpMachine) { + if annotations.IsPaused(cluster, gcpMachine) { log.Info("GCPMachine or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } diff --git a/exp/controllers/gcpmanagedcluster_controller.go b/exp/controllers/gcpmanagedcluster_controller.go index 45ee3ef47..8096b9fbb 100644 --- a/exp/controllers/gcpmanagedcluster_controller.go +++ b/exp/controllers/gcpmanagedcluster_controller.go @@ -32,11 +32,11 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/networks" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/subnets" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" @@ -82,7 +82,7 @@ func (r *GCPManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Fetch the Cluster. - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) if err != nil { log.Error(err, "Failed to get owner cluster") return ctrl.Result{}, err @@ -92,7 +92,7 @@ func (r *GCPManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpCluster) { + if annotations.IsPaused(cluster, gcpCluster) { log.Info("GCPManagedCluster or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } @@ -284,7 +284,7 @@ func (r *GCPManagedClusterReconciler) managedControlPlaneMapper() handler.MapFun return nil } - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) if err != nil { log.Error(err, "failed to get owning cluster") return nil diff --git a/exp/controllers/gcpmanagedcontrolplane_controller.go b/exp/controllers/gcpmanagedcontrolplane_controller.go index 28e30fed6..5fd6b68c7 100644 --- a/exp/controllers/gcpmanagedcontrolplane_controller.go +++ b/exp/controllers/gcpmanagedcontrolplane_controller.go @@ -27,10 +27,10 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/container/clusters" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" @@ -75,7 +75,7 @@ func (r *GCPManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, gcpManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &infrav1exp.GCPManagedControlPlane{})), - capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), )); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -99,7 +99,7 @@ func (r *GCPManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct } // Get the cluster - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) if err != nil { log.Error(err, "Failed to retrieve owner Cluster from the API Server") return ctrl.Result{}, err @@ -109,7 +109,7 @@ func (r *GCPManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpManagedControlPlane) { + if annotations.IsPaused(cluster, gcpManagedControlPlane) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } diff --git a/exp/controllers/gcpmanagedmachinepool_controller.go b/exp/controllers/gcpmanagedmachinepool_controller.go index 9dce03983..4f81f85c9 100644 --- a/exp/controllers/gcpmanagedmachinepool_controller.go +++ b/exp/controllers/gcpmanagedmachinepool_controller.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/container/nodepools" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" + "sigs.k8s.io/cluster-api/util/annotations" v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/record" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" @@ -181,7 +181,7 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, if err := c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), - capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), )); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -252,12 +252,12 @@ func (r *GCPManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr } // Get the cluster - cluster, err := capiutils.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) if err != nil { log.Info("Failed to retrieve Cluster from MachinePool") return ctrl.Result{}, err } - if capiutils.IsPaused(cluster, gcpManagedMachinePool) { + if annotations.IsPaused(cluster, gcpManagedMachinePool) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } diff --git a/pkg/capiutils/predicates.go b/pkg/capiutils/predicates.go deleted file mode 100644 index 73436a89f..000000000 --- a/pkg/capiutils/predicates.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package capiutils contains utility functions for working with Cluster API resources. -// These have mostly been inlined as part of the CAPI 1.10 -> 1.11 upgrade, -// and should be removed when we switch to reading CAPI v1beta2 objects. -// -// Deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped. -package capiutils - -import ( - "fmt" - - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - "sigs.k8s.io/cluster-api/util/predicates" - - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" -) - -// ClusterUpdateInfraReady returns a predicate that returns true for an update event when a cluster has Status.InfrastructureReady changed from false to true -// it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. -func ClusterUpdateInfraReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - log := logger.WithValues("predicate", "ClusterUpdateInfraReady", "eventType", "update") - if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { - log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) - } - - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) - return false - } - - newCluster := e.ObjectNew.(*clusterv1.Cluster) - - if !ptr.Deref(oldCluster.Status.Initialization.InfrastructureProvisioned, false) && ptr.Deref(newCluster.Status.Initialization.InfrastructureProvisioned, false) { - log.V(6).Info("Cluster infrastructure became ready, allowing further processing") - return true - } - - log.V(4).Info("Cluster infrastructure did not become ready, blocking further processing") - return false - }, - CreateFunc: func(event.CreateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, - } -} - -// ClusterPausedTransitions returns a predicate that returns true for an update event when a cluster has Spec.Paused changed. -// FIXME(chrischdi): use new predicates from CAPI -func ClusterPausedTransitions(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - log := logger.WithValues("predicate", "ClusterPausedTransitions", "eventType", "update") - if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { - log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) - } - - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) - return false - } - - newCluster := e.ObjectNew.(*clusterv1.Cluster) - - if ptr.Deref(oldCluster.Spec.Paused, false) && !ptr.Deref(newCluster.Spec.Paused, false) { - log.V(6).Info("Cluster unpausing, allowing further processing") - return true - } - - if !ptr.Deref(oldCluster.Spec.Paused, false) && ptr.Deref(newCluster.Spec.Paused, false) { - log.V(6).Info("Cluster pausing, allowing further processing") - return true - } - - // This predicate always work in "or" with Paused predicates - // so the logs are adjusted to not provide false negatives/verbosity at V<=5. - log.V(6).Info("Cluster paused state was not changed, blocking further processing") - return false - }, - CreateFunc: func(event.CreateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, - } -} - -// ClusterPausedTransitionsOrInfrastructureReady returns a Predicate that returns true on Cluster Update events where -// either Cluster.Spec.Paused transitions or Cluster.Status.InfrastructureReady transitions to true. -// This implements a common requirement for some cluster-api and provider controllers (such as Machine Infrastructure -// controllers) to resume reconciliation when the Cluster gets paused or unpaused and when the infrastructure becomes ready. -// Example use: -// -// err := controller.Watch( -// source.Kind(cache, &clusterv1.Cluster{}), -// handler.EnqueueRequestsFromMapFunc(clusterToMachines) -// predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), r.Log), -// ) -func ClusterPausedTransitionsOrInfrastructureReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { - log := logger.WithValues("predicate", "ClusterPausedTransitionsOrInfrastructureReady") - - return predicates.Any(scheme, log, ClusterPausedTransitions(scheme, log), ClusterUpdateInfraReady(scheme, log)) -} diff --git a/pkg/capiutils/utils.go b/pkg/capiutils/utils.go deleted file mode 100644 index f27d31020..000000000 --- a/pkg/capiutils/utils.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capiutils - -import ( - "context" - - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/utils/ptr" - "sigs.k8s.io/controller-runtime/pkg/client" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" -) - -// IsControlPlaneMachine checks machine is a control plane node. -func IsControlPlaneMachine(machine *clusterv1.Machine) bool { - _, ok := machine.Labels[clusterv1.MachineControlPlaneLabel] - return ok -} - -// GetOwnerCluster returns the Cluster object owning the current resource. -func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { - for _, ref := range obj.GetOwnerReferences() { - if ref.Kind != "Cluster" { - continue - } - gv, err := schema.ParseGroupVersion(ref.APIVersion) - if err != nil { - return nil, errors.WithStack(err) - } - if gv.Group == clusterv1.GroupVersion.Group { - return GetClusterByName(ctx, c, obj.Namespace, ref.Name) - } - } - return nil, nil -} - -// GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. -func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Cluster, error) { - if obj.Labels[clusterv1.ClusterNameLabel] == "" { - return nil, errors.WithStack(util.ErrNoCluster) - } - return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[clusterv1.ClusterNameLabel]) -} - -// GetClusterByName finds and return a Cluster object using the specified params. -func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Cluster, error) { - cluster := &clusterv1.Cluster{} - key := client.ObjectKey{ - Namespace: namespace, - Name: name, - } - - if err := c.Get(ctx, key, cluster); err != nil { - return nil, errors.Wrapf(err, "failed to get Cluster/%s", name) - } - - return cluster, nil -} - -// IsPaused returns true if the Cluster is paused or the object has the `paused` annotation. -func IsPaused(cluster *clusterv1.Cluster, o metav1.Object) bool { - if ptr.Deref(cluster.Spec.Paused, false) { - return true - } - return annotations.HasPaused(o) -} - -// GetOwnerMachine returns the Machine object owning the current resource. -func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.Machine, error) { - for _, ref := range obj.GetOwnerReferences() { - gv, err := schema.ParseGroupVersion(ref.APIVersion) - if err != nil { - return nil, err - } - if ref.Kind == "Machine" && gv.Group == clusterv1.GroupVersion.Group { - return GetMachineByName(ctx, c, obj.Namespace, ref.Name) - } - } - return nil, nil -} - -// GetMachineByName finds and return a Machine object using the specified params. -func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.Machine, error) { - m := &clusterv1.Machine{} - key := client.ObjectKey{Name: name, Namespace: namespace} - if err := c.Get(ctx, key, m); err != nil { - return nil, err - } - return m, nil -} From 27c3e2c4a0fc2cb4a2155825dd27ba1044482b43 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 11 Nov 2025 14:43:08 +0100 Subject: [PATCH 5/5] fix upgrades test to have CCM --- .../cluster-template-upgrades.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml b/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml index abaf5dfe5..63943fd87 100644 --- a/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml +++ b/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml @@ -154,3 +154,22 @@ spec: spec: instanceType: "${GCP_NODE_MACHINE_TYPE}" image: "${KUBERNETES_IMAGE_UPGRADE_TO}" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${CLUSTER_NAME}-crs-ccm" +data: ${CCM_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-ccm" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + ccm: "${CLUSTER_NAME}-crs-ccm" + resources: + - name: "${CLUSTER_NAME}-crs-ccm" + kind: ConfigMap