diff --git a/.golangci.yml b/.golangci.yml index 29f85ed5e..45be0534c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -77,6 +77,14 @@ linters: alias: kerrors - pkg: sigs.k8s.io/controller-runtime alias: ctrl + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta2" + alias: clusterv1 + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta1" + alias: clusterv1beta1 + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + alias: v1beta1patch + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + alias: v1beta1conditions no-unaliased: true exclusions: generated: lax diff --git a/api/v1beta1/gcpcluster_types.go b/api/v1beta1/gcpcluster_types.go index f9728fc91..88d930530 100644 --- a/api/v1beta1/gcpcluster_types.go +++ b/api/v1beta1/gcpcluster_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -37,7 +37,7 @@ type GCPClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // NetworkSpec encapsulates all things related to GCP network. // +optional @@ -77,8 +77,8 @@ type GCPClusterSpec struct { // GCPClusterStatus defines the observed state of GCPCluster. type GCPClusterStatus struct { - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Network Network `json:"network,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` + Network Network `json:"network,omitempty"` // Bastion Instance `json:"bastion,omitempty"` Ready bool `json:"ready"` diff --git a/api/v1beta1/gcpclustertemplate_types.go b/api/v1beta1/gcpclustertemplate_types.go index c1c325a12..4f8a3dc2b 100644 --- a/api/v1beta1/gcpclustertemplate_types.go +++ b/api/v1beta1/gcpclustertemplate_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GCPClusterTemplateSpec defines the desired state of GCPClusterTemplate. @@ -31,7 +31,7 @@ type GCPClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` Spec GCPClusterSpec `json:"spec"` } diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index 27019aff9..4d94c8f2c 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -19,7 +19,7 @@ package v1beta1 import ( "fmt" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GCPMachineTemplateResource describes the data needed to create am GCPMachine from a template. @@ -27,7 +27,7 @@ type GCPMachineTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` // Spec is the specification of the desired behavior of the machine. Spec GCPMachineSpec `json:"spec"` diff --git a/cloud/interfaces.go b/cloud/interfaces.go index c7cf79021..58aeda69d 100644 --- a/cloud/interfaces.go +++ b/cloud/interfaces.go @@ -19,12 +19,12 @@ package cloud import ( "context" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" corev1 "k8s.io/api/core/v1" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // Cloud alias for cloud.Cloud interface. @@ -61,7 +61,7 @@ type ClusterGetter interface { SkipFirewallRuleCreation() bool Network() *infrav1.Network AdditionalLabels() infrav1.Labels - FailureDomains() clusterv1.FailureDomains + FailureDomains() []string ControlPlaneEndpoint() clusterv1.APIEndpoint ResourceManagerTags() infrav1.ResourceManagerTags LoadBalancer() infrav1.LoadBalancerSpec diff --git a/cloud/scope/cluster.go b/cloud/scope/cluster.go index 6fc3ef4c8..59b7c291f 100644 --- a/cloud/scope/cluster.go +++ b/cloud/scope/cluster.go @@ -27,7 +27,8 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -185,17 +186,24 @@ func (s *ClusterScope) ResourceManagerTags() infrav1.ResourceManagerTags { // ControlPlaneEndpoint returns the cluster control-plane endpoint. func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { - endpoint := s.GCPCluster.Spec.ControlPlaneEndpoint - endpoint.Port = 443 - if c := s.Cluster.Spec.ClusterNetwork; c != nil { - endpoint.Port = ptr.Deref(c.APIServerPort, 443) + endpoint := clusterv1.APIEndpoint{ + Host: s.GCPCluster.Spec.ControlPlaneEndpoint.Host, + Port: 443, + } + + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + endpoint.Port = s.Cluster.Spec.ClusterNetwork.APIServerPort } return endpoint } // FailureDomains returns the cluster failure domains. -func (s *ClusterScope) FailureDomains() clusterv1.FailureDomains { - return s.GCPCluster.Status.FailureDomains +func (s *ClusterScope) FailureDomains() []string { + failureDomains := []string{} + for failureDomainName := range s.GCPCluster.Status.FailureDomains { + failureDomains = append(failureDomains, failureDomainName) + } + return failureDomains } // ANCHOR_END: ClusterGetter @@ -208,13 +216,16 @@ func (s *ClusterScope) SetReady() { } // SetFailureDomains sets cluster failure domains. -func (s *ClusterScope) SetFailureDomains(fd clusterv1.FailureDomains) { +func (s *ClusterScope) SetFailureDomains(fd clusterv1beta1.FailureDomains) { s.GCPCluster.Status.FailureDomains = fd } // SetControlPlaneEndpoint sets cluster control-plane endpoint. func (s *ClusterScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { - s.GCPCluster.Spec.ControlPlaneEndpoint = endpoint + s.GCPCluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ + Host: endpoint.Host, + Port: endpoint.Port, + } } // ANCHOR_END: ClusterSetter @@ -354,8 +365,8 @@ func (s *ClusterScope) BackendServiceSpec(lbname string) *compute.BackendService // ForwardingRuleSpec returns google compute forwarding-rule spec. func (s *ClusterScope) ForwardingRuleSpec(lbname string) *compute.ForwardingRule { port := int32(443) - if c := s.Cluster.Spec.ClusterNetwork; c != nil { - port = ptr.Deref(c.APIServerPort, 443) + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + port = s.Cluster.Spec.ClusterNetwork.APIServerPort } portRange := fmt.Sprintf("%d-%d", port, port) return &compute.ForwardingRule{ diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index c4bc0bfe6..9c29d0ae1 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/cloud/providerid" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -99,19 +99,15 @@ func (m *MachineScope) NetworkCloud() cloud.Cloud { // Zone returns the FailureDomain for the GCPMachine. func (m *MachineScope) Zone() string { - if m.Machine.Spec.FailureDomain == nil { + if m.Machine.Spec.FailureDomain == "" { fd := m.ClusterGetter.FailureDomains() if len(fd) == 0 { return "" } - zones := make([]string, 0, len(fd)) - for zone := range fd { - zones = append(zones, zone) - } - sort.Strings(zones) - return zones[0] + sort.Strings(fd) + return fd[0] } - return *m.Machine.Spec.FailureDomain + return m.Machine.Spec.FailureDomain } // Project return the project for the GCPMachine's cluster. @@ -228,10 +224,7 @@ func (m *MachineScope) SetAddresses(addressList []corev1.NodeAddress) { // InstanceImageSpec returns compute instance image attched-disk spec. func (m *MachineScope) InstanceImageSpec() *compute.AttachedDisk { - version := "" - if m.Machine.Spec.Version != nil { - version = *m.Machine.Spec.Version - } + version := m.Machine.Spec.Version image := "capi-ubuntu-1804-k8s-" + strings.ReplaceAll(semver.MajorMinor(version), ".", "-") sourceImage := path.Join("projects", m.ClusterGetter.Project(), "global", "images", "family", image) if m.GCPMachine.Spec.Image != nil { diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index 96d6c4e65..05bc3b175 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -30,7 +30,7 @@ func TestMachineLocalSSDDiskType(t *testing.T) { failureDomain := "example.com" testMachine := clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - FailureDomain: &failureDomain, + FailureDomain: failureDomain, }, } @@ -89,7 +89,7 @@ func TestInstanceNetworkInterfaceAliasIPRangesSpec(t *testing.T) { failureDomain := "us-central1-a" testMachine := clusterv1.Machine{ Spec: clusterv1.MachineSpec{ - FailureDomain: &failureDomain, + FailureDomain: failureDomain, }, } diff --git a/cloud/scope/managedcluster.go b/cloud/scope/managedcluster.go index b3095374c..03de2c201 100644 --- a/cloud/scope/managedcluster.go +++ b/cloud/scope/managedcluster.go @@ -27,7 +27,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -173,14 +174,23 @@ func (s *ManagedClusterScope) ResourceManagerTags() infrav1.ResourceManagerTags // ControlPlaneEndpoint returns the cluster control-plane endpoint. func (s *ManagedClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { - endpoint := s.GCPManagedCluster.Spec.ControlPlaneEndpoint - endpoint.Port = ptr.Deref(s.Cluster.Spec.ClusterNetwork.APIServerPort, 443) + endpoint := clusterv1.APIEndpoint{ + Host: s.GCPManagedCluster.Spec.ControlPlaneEndpoint.Host, + Port: 443, + } + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + endpoint.Port = s.Cluster.Spec.ClusterNetwork.APIServerPort + } return endpoint } // FailureDomains returns the cluster failure domains. -func (s *ManagedClusterScope) FailureDomains() clusterv1.FailureDomains { - return s.GCPManagedCluster.Status.FailureDomains +func (s *ManagedClusterScope) FailureDomains() []string { + failureDomains := []string{} + for failureDomainName := range s.GCPManagedCluster.Status.FailureDomains { + failureDomains = append(failureDomains, failureDomainName) + } + return failureDomains } // ANCHOR_END: ClusterGetter @@ -193,13 +203,16 @@ func (s *ManagedClusterScope) SetReady() { } // SetFailureDomains sets cluster failure domains. -func (s *ManagedClusterScope) SetFailureDomains(fd clusterv1.FailureDomains) { +func (s *ManagedClusterScope) SetFailureDomains(fd clusterv1beta1.FailureDomains) { s.GCPManagedCluster.Status.FailureDomains = fd } // SetControlPlaneEndpoint sets cluster control-plane endpoint. func (s *ManagedClusterScope) SetControlPlaneEndpoint(endpoint clusterv1.APIEndpoint) { - s.GCPManagedCluster.Spec.ControlPlaneEndpoint = endpoint + s.GCPManagedCluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ + Host: endpoint.Host, + Port: endpoint.Port, + } } // ANCHOR_END: ClusterSetter diff --git a/cloud/scope/managedcontrolplane.go b/cloud/scope/managedcontrolplane.go index b168bab0c..5833694d6 100644 --- a/cloud/scope/managedcontrolplane.go +++ b/cloud/scope/managedcontrolplane.go @@ -22,15 +22,16 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/util/location" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" container "cloud.google.com/go/container/apiv1" credentials "cloud.google.com/go/iam/credentials/apiv1" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" "github.com/pkg/errors" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -91,7 +92,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane params.CredentialsClient = credentialsClient } - helper, err := patch.NewHelper(params.GCPManagedControlPlane, params.Client) + helper, err := v1beta1patch.NewHelper(params.GCPManagedControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -112,7 +113,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane // ManagedControlPlaneScope defines the basic context for an actuator to operate upon. type ManagedControlPlaneScope struct { client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster GCPManagedCluster *infrav1exp.GCPManagedCluster @@ -131,7 +132,7 @@ func (s *ManagedControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.GCPManagedControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneUpdatingCondition, @@ -148,7 +149,7 @@ func (s *ManagedControlPlaneScope) Close() error { } // ConditionSetter return a condition setter (which is GCPManagedControlPlane itself). -func (s *ManagedControlPlaneScope) ConditionSetter() conditions.Setter { +func (s *ManagedControlPlaneScope) ConditionSetter() v1beta1conditions.Setter { return s.GCPManagedControlPlane } @@ -226,7 +227,7 @@ func (s *ManagedControlPlaneScope) ClusterName() string { // SetEndpoint sets the Endpoint of GCPManagedControlPlane. func (s *ManagedControlPlaneScope) SetEndpoint(host string) { - s.GCPManagedControlPlane.Spec.Endpoint = clusterv1.APIEndpoint{ + s.GCPManagedControlPlane.Spec.Endpoint = clusterv1beta1.APIEndpoint{ Host: host, Port: APIServerPort, } diff --git a/cloud/scope/managedmachinepool.go b/cloud/scope/managedmachinepool.go index 2fdd46eba..c08f20c51 100644 --- a/cloud/scope/managedmachinepool.go +++ b/cloud/scope/managedmachinepool.go @@ -26,15 +26,16 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/util/location" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" compute "cloud.google.com/go/compute/apiv1" container "cloud.google.com/go/container/apiv1" "cloud.google.com/go/container/apiv1/containerpb" "github.com/pkg/errors" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -84,7 +85,7 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc params.InstanceGroupManagersClient = instanceGroupManagersClient } - helper, err := patch.NewHelper(params.GCPManagedMachinePool, params.Client) + helper, err := v1beta1patch.NewHelper(params.GCPManagedMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -104,7 +105,7 @@ func NewManagedMachinePoolScope(ctx context.Context, params ManagedMachinePoolSc // ManagedMachinePoolScope defines the basic context for an actuator to operate upon. type ManagedMachinePoolScope struct { client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster MachinePool *clusterv1.MachinePool @@ -120,7 +121,7 @@ func (s *ManagedMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.GCPManagedMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolUpdatingCondition, @@ -136,7 +137,7 @@ func (s *ManagedMachinePoolScope) Close() error { } // ConditionSetter return a condition setter (which is GCPManagedMachinePool itself). -func (s *ManagedMachinePoolScope) ConditionSetter() conditions.Setter { +func (s *ManagedMachinePoolScope) ConditionSetter() v1beta1conditions.Setter { return s.GCPManagedMachinePool } @@ -151,7 +152,7 @@ func (s *ManagedMachinePoolScope) InstanceGroupManagersClient() *compute.Instanc } // NodePoolVersion returns the k8s version of the node pool. -func (s *ManagedMachinePoolScope) NodePoolVersion() *string { +func (s *ManagedMachinePoolScope) NodePoolVersion() string { return s.MachinePool.Spec.Template.Spec.Version } @@ -268,8 +269,8 @@ func ConvertToSdkNodePool(nodePool infrav1exp.GCPManagedMachinePool, machinePool Type: containerpb.SandboxConfig_GVISOR, } } - if machinePool.Spec.Template.Spec.Version != nil { - sdkNodePool.Version = strings.Replace(*machinePool.Spec.Template.Spec.Version, "v", "", 1) + if machinePool.Spec.Template.Spec.Version != "" { + sdkNodePool.Version = strings.Replace(machinePool.Spec.Template.Spec.Version, "v", "", 1) } return &sdkNodePool } diff --git a/cloud/scope/managedmachinepool_test.go b/cloud/scope/managedmachinepool_test.go index 8a08e5f25..765007a64 100644 --- a/cloud/scope/managedmachinepool_test.go +++ b/cloud/scope/managedmachinepool_test.go @@ -8,12 +8,12 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( TestGCPMMP *v1beta1.GCPManagedMachinePool - TestMP *clusterv1exp.MachinePool + TestMP *clusterv1.MachinePool TestClusterName string ) @@ -36,8 +36,8 @@ var _ = Describe("GCPManagedMachinePool Scope", func() { }, }, } - TestMP = &clusterv1exp.MachinePool{ - Spec: clusterv1exp.MachinePoolSpec{ + TestMP = &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: &replicas, }, } diff --git a/cloud/services/compute/firewalls/reconcile_test.go b/cloud/services/compute/firewalls/reconcile_test.go index aa1b080af..ec19cc3c9 100644 --- a/cloud/services/compute/firewalls/reconcile_test.go +++ b/cloud/services/compute/firewalls/reconcile_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) diff --git a/cloud/services/compute/instances/reconcile_test.go b/cloud/services/compute/instances/reconcile_test.go index bb5404134..859a3701c 100644 --- a/cloud/services/compute/instances/reconcile_test.go +++ b/cloud/services/compute/instances/reconcile_test.go @@ -35,7 +35,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -71,8 +72,8 @@ var fakeMachine = &clusterv1.Machine{ Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("my-cluster-bootstrap"), }, - FailureDomain: ptr.To[string]("us-central1-c"), - Version: ptr.To[string]("v1.19.11"), + FailureDomain: "us-central1-c", + Version: "v1.19.11", }, } @@ -85,7 +86,7 @@ var fakeMachineWithOutFailureDomain = &clusterv1.Machine{ Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("my-cluster-bootstrap"), }, - Version: ptr.To[string]("v1.19.11"), + Version: "v1.19.11", }, } @@ -99,10 +100,10 @@ var fakeGCPClusterWithOutFailureDomain = &infrav1.GCPCluster{ Region: "us-central1", }, Status: infrav1.GCPClusterStatus{ - FailureDomains: clusterv1.FailureDomains{ - "us-central1-a": clusterv1.FailureDomainSpec{ControlPlane: true}, - "us-central1-b": clusterv1.FailureDomainSpec{ControlPlane: true}, - "us-central1-c": clusterv1.FailureDomainSpec{ControlPlane: true}, + FailureDomains: clusterv1beta1.FailureDomains{ + "us-central1-a": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, + "us-central1-b": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, + "us-central1-c": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, }, }, } diff --git a/cloud/services/compute/loadbalancers/reconcile.go b/cloud/services/compute/loadbalancers/reconcile.go index 39fa72b1a..5ad8ad53e 100644 --- a/cloud/services/compute/loadbalancers/reconcile.go +++ b/cloud/services/compute/loadbalancers/reconcile.go @@ -254,11 +254,7 @@ func (s *Service) createInternalLoadBalancer(ctx context.Context, name string, l func (s *Service) createOrGetInstanceGroups(ctx context.Context) ([]*compute.InstanceGroup, error) { log := log.FromContext(ctx) - fd := s.scope.FailureDomains() - zones := make([]string, 0, len(fd)) - for zone := range fd { - zones = append(zones, zone) - } + zones := s.scope.FailureDomains() groups := make([]*compute.InstanceGroup, 0, len(zones)) groupsMap := s.scope.Network().APIServerInstanceGroups diff --git a/cloud/services/compute/loadbalancers/reconcile_test.go b/cloud/services/compute/loadbalancers/reconcile_test.go index ae5db760c..d7f25c838 100644 --- a/cloud/services/compute/loadbalancers/reconcile_test.go +++ b/cloud/services/compute/loadbalancers/reconcile_test.go @@ -31,7 +31,8 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -75,8 +76,8 @@ func getBaseClusterScope() (*scope.ClusterScope, error) { }, }, Status: infrav1.GCPClusterStatus{ - FailureDomains: clusterv1.FailureDomains{ - "us-central1-a": clusterv1.FailureDomainSpec{ControlPlane: true}, + FailureDomains: clusterv1beta1.FailureDomains{ + "us-central1-a": clusterv1beta1.FailureDomainSpec{ControlPlane: true}, }, }, } @@ -123,9 +124,8 @@ func getBaseClusterScopeWithPortSet() (*scope.ClusterScope, error) { return nil, err } - port := int32(6443) - clusterScope.Cluster.Spec.ClusterNetwork = &clusterv1.ClusterNetwork{ - APIServerPort: &port, + clusterScope.Cluster.Spec.ClusterNetwork = clusterv1.ClusterNetwork{ + APIServerPort: 6443, } return clusterScope, nil } diff --git a/cloud/services/compute/networks/reconcile_test.go b/cloud/services/compute/networks/reconcile_test.go index e0bcde88b..11b8187ef 100644 --- a/cloud/services/compute/networks/reconcile_test.go +++ b/cloud/services/compute/networks/reconcile_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) diff --git a/cloud/services/compute/subnets/reconcile_test.go b/cloud/services/compute/subnets/reconcile_test.go index 3d8030bf6..7b724093d 100644 --- a/cloud/services/compute/subnets/reconcile_test.go +++ b/cloud/services/compute/subnets/reconcile_test.go @@ -33,7 +33,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) diff --git a/cloud/services/container/clusters/reconcile.go b/cloud/services/container/clusters/reconcile.go index 8b1bdf577..3adcc40d7 100644 --- a/cloud/services/container/clusters/reconcile.go +++ b/cloud/services/container/clusters/reconcile.go @@ -33,8 +33,8 @@ import ( "google.golang.org/grpc/codes" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -48,7 +48,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { if err != nil { s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "describing cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "describing cluster: %v", err) return ctrl.Result{}, err } if cluster == nil { @@ -58,40 +58,40 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { nodePools, _, err := s.scope.GetAllNodePools(ctx) if err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "fetching node pools: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "fetching node pools: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "fetching node pools: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "fetching node pools: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "fetching node pools: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "fetching node pools: %v", err) return ctrl.Result{}, err } if s.scope.IsAutopilotCluster() { if len(nodePools) > 0 { log.Error(ErrAutopilotClusterMachinePoolsNotAllowed, fmt.Sprintf("%d machine pools defined", len(nodePools))) - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, ErrAutopilotClusterMachinePoolsNotAllowed } } else { if len(nodePools) == 0 { log.Info("At least 1 node pool is required to create GKE cluster with autopilot disabled") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneRequiresAtLeastOneNodePoolReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } } if err = s.createCluster(ctx, &log); err != nil { log.Error(err, "failed creating cluster") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating cluster: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating cluster: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating cluster: %v", err) return ctrl.Result{}, err } log.Info("Cluster created provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } @@ -103,23 +103,23 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { switch cluster.GetStatus() { case containerpb.Cluster_PROVISIONING: log.Info("Cluster provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.Cluster_RECONCILING: log.Info("Cluster reconciling in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = true s.scope.GCPManagedControlPlane.Status.Ready = true return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.Cluster_STOPPING: log.Info("Cluster stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil @@ -129,7 +129,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { msg = cluster.GetConditions()[0].GetMessage() } log.Error(errors.New("Cluster in error/degraded state"), msg, "name", s.scope.ClusterName()) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneErrorReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneErrorReason, clusterv1beta1.ConditionSeverityError, "") s.scope.GCPManagedControlPlane.Status.Ready = false s.scope.GCPManagedControlPlane.Status.Initialized = false return ctrl.Result{}, nil @@ -149,12 +149,12 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, err } log.Info("Cluster updating in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition) s.scope.GCPManagedControlPlane.Status.Initialized = true s.scope.GCPManagedControlPlane.Status.Ready = true return ctrl.Result{}, nil } - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition, infrav1exp.GKEControlPlaneUpdatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneUpdatingCondition, infrav1exp.GKEControlPlaneUpdatedReason, clusterv1beta1.ConditionSeverityInfo, "") // Reconcile kubeconfig err = s.reconcileKubeconfig(ctx, cluster, &log) @@ -169,9 +169,9 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { } s.scope.SetEndpoint(cluster.GetEndpoint()) - conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneCreatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneCreatingCondition, infrav1exp.GKEControlPlaneCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.GCPManagedControlPlane.Status.Ready = true s.scope.GCPManagedControlPlane.Status.Initialized = true @@ -191,7 +191,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { } if cluster == nil { log.Info("Cluster already deleted") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneDeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -204,23 +204,23 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, nil case containerpb.Cluster_STOPPING: log.Info("Cluster stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) return ctrl.Result{}, nil default: break } if err = s.deleteCluster(ctx, &log); err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "deleting cluster: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition, infrav1exp.GKEControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "deleting cluster: %v", err) return ctrl.Result{}, err } log.Info("Cluster deleting in progress") s.scope.GCPManagedControlPlane.Status.Initialized = false s.scope.GCPManagedControlPlane.Status.Ready = false - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneReadyCondition, infrav1exp.GKEControlPlaneDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEControlPlaneDeletingCondition) return ctrl.Result{}, nil } diff --git a/cloud/services/container/nodepools/reconcile.go b/cloud/services/container/nodepools/reconcile.go index cd24f91e2..31f8d485d 100644 --- a/cloud/services/container/nodepools/reconcile.go +++ b/cloud/services/container/nodepools/reconcile.go @@ -38,8 +38,8 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/shared" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -47,7 +47,7 @@ import ( // setReadyStatusFromConditions updates the GCPManagedMachinePool's ready status based on its conditions. func (s *Service) setReadyStatusFromConditions() { machinePool := s.scope.GCPManagedMachinePool - if conditions.IsTrue(machinePool, clusterv1.ReadyCondition) || conditions.IsTrue(machinePool, infrav1exp.GKEMachinePoolUpdatingCondition) { + if v1beta1conditions.IsTrue(machinePool, clusterv1beta1.ReadyCondition) || v1beta1conditions.IsTrue(machinePool, infrav1exp.GKEMachinePoolUpdatingCondition) { s.scope.GCPManagedMachinePool.Status.Ready = true return } @@ -65,28 +65,28 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { nodePool, err := s.describeNodePool(ctx, &log) if err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "reading node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "reading node pool: %v", err) return ctrl.Result{}, err } if nodePool == nil { log.Info("Node pool not found, creating", "cluster", s.scope.Cluster.Name) if err = s.createNodePool(ctx, &log); err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating node pool: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating node pool: %v", err) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "creating node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "creating node pool: %v", err) return ctrl.Result{}, err } log.Info("Node pool provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } log.V(2).Info("Node pool found", "cluster", s.scope.Cluster.Name, "nodepool", nodePool.GetName()) instances, err := s.getInstances(ctx, nodePool) if err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "reading instances: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "reading instances: %v", err) return ctrl.Result{}, err } providerIDList := []string{} @@ -95,7 +95,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { providerID, err := providerid.NewFromResourceURL(instance.GetInstance()) if err != nil { log.Error(err, "parsing instance url", "url", instance.GetInstance()) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, err } providerIDList = append(providerIDList, providerID.String()) @@ -108,21 +108,21 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { case containerpb.NodePool_PROVISIONING: // node pool is creating log.Info("Node pool provisioning in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.NodePool_RECONCILING: // node pool is updating/reconciling log.Info("Node pool reconciling in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.NodePool_STOPPING: // node pool is deleting log.Info("Node pool stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) return ctrl.Result{}, nil case containerpb.NodePool_ERROR, containerpb.NodePool_RUNNING_WITH_ERROR: // node pool is in error or degraded state @@ -131,13 +131,13 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { msg = nodePool.GetConditions()[0].GetMessage() } log.Error(errors.New("Node pool in error/degraded state"), msg, "name", s.scope.GCPManagedMachinePool.Name) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolErrorReason, clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, nil case containerpb.NodePool_RUNNING: // node pool is ready and running - conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") log.Info("Node pool running") default: log.Error(errors.New("Unhandled node pool status"), fmt.Sprintf("Unhandled node pool status %s", nodePool.GetStatus()), "name", s.scope.GCPManagedMachinePool.Name) @@ -153,7 +153,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { } log.Info("Node pool config updating in progress") s.scope.GCPManagedMachinePool.Status.Ready = true - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } @@ -165,7 +165,7 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, err } log.Info("Node pool auto scaling updating in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } @@ -177,18 +177,18 @@ func (s *Service) Reconcile(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{}, err } log.Info("Node pool size updating in progress") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil } - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition, infrav1exp.GKEMachinePoolUpdatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolUpdatingCondition, infrav1exp.GKEMachinePoolUpdatedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.SetReplicas(int32(len(s.scope.GCPManagedMachinePool.Spec.ProviderIDList))) log.Info("Node pool reconciled") s.scope.GCPManagedMachinePool.Status.Ready = true - conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1.ReadyCondition) - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolCreatingCondition, infrav1exp.GKEMachinePoolCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -206,7 +206,7 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { } if nodePool == nil { log.Info("Node pool already deleted") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolDeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, err } @@ -219,21 +219,21 @@ func (s *Service) Delete(ctx context.Context) (ctrl.Result, error) { return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil case containerpb.NodePool_STOPPING: log.Info("Node pool stopping in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) return ctrl.Result{RequeueAfter: reconciler.DefaultRetryTime}, nil default: break } if err = s.deleteNodePool(ctx); err != nil { - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1.ConditionSeverityError, "deleting node pool: %v", err) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition, infrav1exp.GKEMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "deleting node pool: %v", err) return ctrl.Result{}, err } log.Info("Node pool deleting in progress") - conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1.ConditionSeverityInfo, "") - conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), clusterv1beta1.ReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.GKEMachinePoolDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkTrue(s.scope.ConditionSetter(), infrav1exp.GKEMachinePoolDeletingCondition) return ctrl.Result{}, nil } @@ -355,8 +355,8 @@ func (s *Service) checkDiffAndPrepareUpdateConfig(existingNodePool *containerpb. desiredNodePool := scope.ConvertToSdkNodePool(*s.scope.GCPManagedMachinePool, *s.scope.MachinePool, isRegional, s.scope.GCPManagedControlPlane.Spec.ClusterName) // Node version - if s.scope.NodePoolVersion() != nil { - desiredNodePoolVersion := infrav1exp.ConvertFromSdkNodeVersion(*s.scope.NodePoolVersion()) + if s.scope.NodePoolVersion() != "" { + desiredNodePoolVersion := infrav1exp.ConvertFromSdkNodeVersion(s.scope.NodePoolVersion()) if desiredNodePoolVersion != infrav1exp.ConvertFromSdkNodeVersion(existingNodePool.GetVersion()) { needUpdate = true updateNodePoolRequest.NodeVersion = desiredNodePoolVersion diff --git a/cloud/services/shared/machinepool.go b/cloud/services/shared/machinepool.go index 33a7eeb96..9f4cac82b 100644 --- a/cloud/services/shared/machinepool.go +++ b/cloud/services/shared/machinepool.go @@ -21,14 +21,14 @@ import ( "fmt" "strings" - clusterv1exp "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api-provider-gcp/cloud" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" ) // ManagedMachinePoolPreflightCheck will perform checks against the machine pool before its created. -func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachinePool, machinePool *clusterv1exp.MachinePool, location string) error { +func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachinePool, machinePool *clusterv1.MachinePool, location string) error { if machinePool.Spec.Template.Spec.InfrastructureRef.Name != managedPool.Name { return fmt.Errorf("expect machinepool infraref (%s) to match managed machine pool name (%s)", machinePool.Spec.Template.Spec.InfrastructureRef.Name, managedPool.Name) } @@ -49,7 +49,7 @@ func ManagedMachinePoolPreflightCheck(managedPool *infrav1exp.GCPManagedMachineP } // ManagedMachinePoolsPreflightCheck will perform checks against a slice of machine pool before they are created. -func ManagedMachinePoolsPreflightCheck(managedPools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1exp.MachinePool, location string) error { +func ManagedMachinePoolsPreflightCheck(managedPools []infrav1exp.GCPManagedMachinePool, machinePools []clusterv1.MachinePool, location string) error { if len(machinePools) != len(managedPools) { return errors.New("each machinepool must have a matching gcpmanagedmachinepool") } diff --git a/controllers/gcpcluster_controller.go b/controllers/gcpcluster_controller.go index 3d6e3962c..e84281a5c 100644 --- a/controllers/gcpcluster_controller.go +++ b/controllers/gcpcluster_controller.go @@ -31,9 +31,9 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/loadbalancers" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/networks" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/subnets" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" @@ -120,7 +120,7 @@ func (r *GCPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) } // Fetch the Cluster. - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) if err != nil { log.Error(err, "Failed to get owner cluster") return ctrl.Result{}, err @@ -130,7 +130,7 @@ func (r *GCPClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpCluster) { + if annotations.IsPaused(cluster, gcpCluster) { log.Info("GCPCluster of linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } @@ -179,18 +179,18 @@ func (r *GCPClusterReconciler) reconcile(ctx context.Context, clusterScope *scop return ctrl.Result{}, err } - failureDomains := make(clusterv1.FailureDomains, len(zones)) + failureDomains := make(clusterv1beta1.FailureDomains, len(zones)) for _, zone := range zones { if len(clusterScope.GCPCluster.Spec.FailureDomains) > 0 { for _, fd := range clusterScope.GCPCluster.Spec.FailureDomains { if fd == zone.Name { - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: true, } } } } else { - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: true, } } diff --git a/controllers/gcpmachine_controller.go b/controllers/gcpmachine_controller.go index 87c4f582c..d08a45ad7 100644 --- a/controllers/gcpmachine_controller.go +++ b/controllers/gcpmachine_controller.go @@ -26,10 +26,10 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/instances" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" @@ -82,7 +82,7 @@ func (r *GCPMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma if err := c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), - capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), )); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -103,7 +103,7 @@ func (r *GCPMachineReconciler) GCPClusterToGCPMachines(ctx context.Context) hand return nil } - cluster, err := capiutils.GetOwnerCluster(mapCtx, r.Client, c.ObjectMeta) + cluster, err := util.GetOwnerCluster(mapCtx, r.Client, c.ObjectMeta) switch { case apierrors.IsNotFound(err) || cluster == nil: return result @@ -145,7 +145,7 @@ func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - machine, err := capiutils.GetOwnerMachine(ctx, r.Client, gcpMachine.ObjectMeta) + machine, err := util.GetOwnerMachine(ctx, r.Client, gcpMachine.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -155,14 +155,14 @@ func (r *GCPMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) } log = log.WithValues("machine", machine.Name) - cluster, err := capiutils.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { log.Info("Machine is missing cluster label or cluster does not exist") return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpMachine) { + if annotations.IsPaused(cluster, gcpMachine) { log.Info("GCPMachine or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } diff --git a/controllers/gcpmachine_controller_unit_test.go b/controllers/gcpmachine_controller_unit_test.go index 055134660..840ea12ac 100644 --- a/controllers/gcpmachine_controller_unit_test.go +++ b/controllers/gcpmachine_controller_unit_test.go @@ -21,11 +21,10 @@ import ( "testing" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -44,11 +43,10 @@ func newMachine(clusterName, machineName string) *clusterv1.Machine { func newMachineWithInfrastructureRef(clusterName, machineName string) *clusterv1.Machine { m := newMachine(clusterName, machineName) - m.Spec.InfrastructureRef = corev1.ObjectReference{ - Kind: "GCPMachine", - Namespace: "", - Name: "gcp" + machineName, - APIVersion: infrav1.GroupVersion.String(), + m.Spec.InfrastructureRef = clusterv1.ContractVersionedObjectReference{ + Kind: "GCPMachine", + Name: "gcp" + machineName, + APIGroup: infrav1.GroupVersion.Group, } return m diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 9b3c1bf31..7527e75d9 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" diff --git a/exp/api/v1beta1/conditions_consts.go b/exp/api/v1beta1/conditions_consts.go index b651dde77..bc806228b 100644 --- a/exp/api/v1beta1/conditions_consts.go +++ b/exp/api/v1beta1/conditions_consts.go @@ -16,17 +16,17 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // GKEControlPlaneReadyCondition condition reports on the successful reconciliation of GKE control plane. - GKEControlPlaneReadyCondition clusterv1.ConditionType = "GKEControlPlaneReady" + GKEControlPlaneReadyCondition clusterv1beta1.ConditionType = "GKEControlPlaneReady" // GKEControlPlaneCreatingCondition condition reports on whether the GKE control plane is creating. - GKEControlPlaneCreatingCondition clusterv1.ConditionType = "GKEControlPlaneCreating" + GKEControlPlaneCreatingCondition clusterv1beta1.ConditionType = "GKEControlPlaneCreating" // GKEControlPlaneUpdatingCondition condition reports on whether the GKE control plane is updating. - GKEControlPlaneUpdatingCondition clusterv1.ConditionType = "GKEControlPlaneUpdating" + GKEControlPlaneUpdatingCondition clusterv1beta1.ConditionType = "GKEControlPlaneUpdating" // GKEControlPlaneDeletingCondition condition reports on whether the GKE control plane is deleting. - GKEControlPlaneDeletingCondition clusterv1.ConditionType = "GKEControlPlaneDeleting" + GKEControlPlaneDeletingCondition clusterv1beta1.ConditionType = "GKEControlPlaneDeleting" // GKEControlPlaneCreatingReason used to report GKE control plane being created. GKEControlPlaneCreatingReason = "GKEControlPlaneCreating" @@ -46,13 +46,13 @@ const ( GKEControlPlaneRequiresAtLeastOneNodePoolReason = "GKEControlPlaneRequiresAtLeastOneNodePool" // GKEMachinePoolReadyCondition condition reports on the successful reconciliation of GKE node pool. - GKEMachinePoolReadyCondition clusterv1.ConditionType = "GKEMachinePoolReady" + GKEMachinePoolReadyCondition clusterv1beta1.ConditionType = "GKEMachinePoolReady" // GKEMachinePoolCreatingCondition condition reports on whether the GKE node pool is creating. - GKEMachinePoolCreatingCondition clusterv1.ConditionType = "GKEMachinePoolCreating" + GKEMachinePoolCreatingCondition clusterv1beta1.ConditionType = "GKEMachinePoolCreating" // GKEMachinePoolUpdatingCondition condition reports on whether the GKE node pool is updating. - GKEMachinePoolUpdatingCondition clusterv1.ConditionType = "GKEMachinePoolUpdating" + GKEMachinePoolUpdatingCondition clusterv1beta1.ConditionType = "GKEMachinePoolUpdating" // GKEMachinePoolDeletingCondition condition reports on whether the GKE node pool is deleting. - GKEMachinePoolDeletingCondition clusterv1.ConditionType = "GKEMachinePoolDeleting" + GKEMachinePoolDeletingCondition clusterv1beta1.ConditionType = "GKEMachinePoolDeleting" // WaitingForGKEControlPlaneReason used when the machine pool is waiting for GKE control plane infrastructure to be ready before proceeding. WaitingForGKEControlPlaneReason = "WaitingForGKEControlPlane" diff --git a/exp/api/v1beta1/gcpmanagedcluster_types.go b/exp/api/v1beta1/gcpmanagedcluster_types.go index 21d02cee0..038938447 100644 --- a/exp/api/v1beta1/gcpmanagedcluster_types.go +++ b/exp/api/v1beta1/gcpmanagedcluster_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -38,7 +38,7 @@ type GCPManagedClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // NetworkSpec encapsulates all things related to the GCP network. // +optional @@ -72,11 +72,11 @@ type GCPManagedClusterSpec struct { // GCPManagedClusterStatus defines the observed state of GCPManagedCluster. type GCPManagedClusterStatus struct { - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Network infrav1.Network `json:"network,omitempty"` - Ready bool `json:"ready"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` + Network infrav1.Network `json:"network,omitempty"` + Ready bool `json:"ready"` // Conditions specifies the conditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true diff --git a/exp/api/v1beta1/gcpmanagedcontrolplane_types.go b/exp/api/v1beta1/gcpmanagedcontrolplane_types.go index 597697ff5..a800ca348 100644 --- a/exp/api/v1beta1/gcpmanagedcontrolplane_types.go +++ b/exp/api/v1beta1/gcpmanagedcontrolplane_types.go @@ -22,7 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/strings/slices" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -168,7 +168,7 @@ type GCPManagedControlPlaneSpec struct { // Endpoint represents the endpoint used to communicate with the control plane. // +optional - Endpoint clusterv1.APIEndpoint `json:"endpoint"` + Endpoint clusterv1beta1.APIEndpoint `json:"endpoint"` } // GCPManagedControlPlaneStatus defines the observed state of GCPManagedControlPlane. @@ -184,7 +184,7 @@ type GCPManagedControlPlaneStatus struct { Initialized bool `json:"initialized,omitempty"` // Conditions specifies the conditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // CurrentVersion shows the current version of the GKE control plane. // @@ -301,12 +301,12 @@ func (m MonitoringService) String() string { } // GetConditions returns the control planes conditions. -func (r *GCPManagedControlPlane) GetConditions() clusterv1.Conditions { +func (r *GCPManagedControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the GCPManagedControlPlane. -func (r *GCPManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *GCPManagedControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/gcpmanagedmachinepool_types.go b/exp/api/v1beta1/gcpmanagedmachinepool_types.go index e7d11ab73..cbeda0fc5 100644 --- a/exp/api/v1beta1/gcpmanagedmachinepool_types.go +++ b/exp/api/v1beta1/gcpmanagedmachinepool_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -116,7 +116,7 @@ type GCPManagedMachinePoolStatus struct { // +optional Replicas int32 `json:"replicas"` // Conditions specifies the cpnditions for the managed machine pool - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. // +optional InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` @@ -211,12 +211,12 @@ const ( ) // GetConditions returns the machine pool conditions. -func (r *GCPManagedMachinePool) GetConditions() clusterv1.Conditions { +func (r *GCPManagedMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the GCPManagedMachinePool. -func (r *GCPManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +func (r *GCPManagedMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/types_template.go b/exp/api/v1beta1/types_template.go index eec236398..1e7b46ea1 100644 --- a/exp/api/v1beta1/types_template.go +++ b/exp/api/v1beta1/types_template.go @@ -18,7 +18,7 @@ package v1beta1 import ( infrav1 "sigs.k8s.io/cluster-api-provider-gcp/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GCPManagedControlPlaneTemplateResourceSpec specifies an GCP managed control plane template resource. @@ -45,7 +45,7 @@ type GCPManagedClusterTemplateResourceSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // NetworkSpec encapsulates all things related to the GCP network. // +optional diff --git a/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go b/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go index b1b32c592..cbe7a417c 100644 --- a/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go +++ b/exp/bootstrap/gke/api/v1beta1/gkeconfig_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GKEConfigSpec defines the desired state of GCP GKE Bootstrap Configuration. @@ -64,7 +64,7 @@ type GKEConfigStatus struct { // Conditions defines current service state of the GKEConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true diff --git a/exp/bootstrap/gke/controllers/gkeconfig_controller.go b/exp/bootstrap/gke/controllers/gkeconfig_controller.go index c92a6f500..1bb1f8eb5 100644 --- a/exp/bootstrap/gke/controllers/gkeconfig_controller.go +++ b/exp/bootstrap/gke/controllers/gkeconfig_controller.go @@ -34,7 +34,7 @@ import ( infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" bootstrapv1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/bootstrap/gke/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -97,7 +97,7 @@ func (r *GKEConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( gcpMP := &infrav1exp.GCPManagedMachinePool{} gcpMPKey := types.NamespacedName{ Name: machinePool.Spec.Template.Spec.InfrastructureRef.Name, - Namespace: machinePool.Spec.Template.Spec.InfrastructureRef.Namespace, + Namespace: machinePool.Namespace, } if err := r.Get(ctx, gcpMPKey, gcpMP); err != nil { if apierrors.IsNotFound(err) { @@ -148,13 +148,13 @@ func (r *GKEConfigReconciler) ManagedMachinePoolToGKEConfigMapFunc(_ context.Con { NamespacedName: client.ObjectKey{ Name: machinePool.Spec.Template.Spec.InfrastructureRef.Name, - Namespace: machinePool.Spec.Template.Spec.InfrastructureRef.Namespace, + Namespace: machinePool.Namespace, }, }, } } -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expclusterv1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -163,7 +163,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == expclusterv1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -171,8 +171,8 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object return nil, nil } -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expclusterv1.MachinePool, error) { - m := &expclusterv1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachinePool, error) { + m := &clusterv1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err diff --git a/exp/controllers/gcpmanagedcluster_controller.go b/exp/controllers/gcpmanagedcluster_controller.go index 98085dd3e..8096b9fbb 100644 --- a/exp/controllers/gcpmanagedcluster_controller.go +++ b/exp/controllers/gcpmanagedcluster_controller.go @@ -32,10 +32,11 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/networks" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/compute/subnets" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" ctrl "sigs.k8s.io/controller-runtime" @@ -81,7 +82,7 @@ func (r *GCPManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re } // Fetch the Cluster. - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpCluster.ObjectMeta) if err != nil { log.Error(err, "Failed to get owner cluster") return ctrl.Result{}, err @@ -91,7 +92,7 @@ func (r *GCPManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpCluster) { + if annotations.IsPaused(cluster, gcpCluster) { log.Info("GCPManagedCluster or linked Cluster is marked as paused. Won't reconcile") return ctrl.Result{}, nil } @@ -101,7 +102,7 @@ func (r *GCPManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re controlPlane := &infrav1exp.GCPManagedControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } log.V(4).Info("getting control plane ", "ref", controlPlaneRef) @@ -186,9 +187,9 @@ func (r *GCPManagedClusterReconciler) reconcile(ctx context.Context, clusterScop return err } - failureDomains := make(clusterv1.FailureDomains, len(zones)) + failureDomains := make(clusterv1beta1.FailureDomains, len(zones)) for _, zone := range zones { - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: false, } } @@ -212,7 +213,10 @@ func (r *GCPManagedClusterReconciler) reconcile(ctx context.Context, clusterScop record.Event(clusterScope.GCPManagedCluster, "GCPManagedClusterReconcile", "Ready") controlPlaneEndpoint := clusterScope.GCPManagedControlPlane.Spec.Endpoint - clusterScope.SetControlPlaneEndpoint(controlPlaneEndpoint) + clusterScope.SetControlPlaneEndpoint(clusterv1.APIEndpoint{ + Host: controlPlaneEndpoint.Host, + Port: controlPlaneEndpoint.Port, + }) if controlPlaneEndpoint.IsZero() { log.Info("GCPManagedControlplane does not have endpoint yet. Reconciling") @@ -280,7 +284,7 @@ func (r *GCPManagedClusterReconciler) managedControlPlaneMapper() handler.MapFun return nil } - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) if err != nil { log.Error(err, "failed to get owning cluster") return nil @@ -291,7 +295,7 @@ func (r *GCPManagedClusterReconciler) managedControlPlaneMapper() handler.MapFun } managedClusterRef := cluster.Spec.InfrastructureRef - if managedClusterRef == nil || managedClusterRef.Kind != "GCPManagedCluster" { + if !managedClusterRef.IsDefined() || managedClusterRef.Kind != "GCPManagedCluster" { log.Info("InfrastructureRef is nil or not GCPManagedCluster, skipping mapping") return nil } @@ -300,7 +304,7 @@ func (r *GCPManagedClusterReconciler) managedControlPlaneMapper() handler.MapFun { NamespacedName: types.NamespacedName{ Name: managedClusterRef.Name, - Namespace: managedClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/exp/controllers/gcpmanagedcontrolplane_controller.go b/exp/controllers/gcpmanagedcontrolplane_controller.go index 9887fcf21..5fd6b68c7 100644 --- a/exp/controllers/gcpmanagedcontrolplane_controller.go +++ b/exp/controllers/gcpmanagedcontrolplane_controller.go @@ -27,11 +27,11 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/container/clusters" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/annotations" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/record" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -75,7 +75,7 @@ func (r *GCPManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, gcpManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &infrav1exp.GCPManagedControlPlane{})), - capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), )); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -99,7 +99,7 @@ func (r *GCPManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct } // Get the cluster - cluster, err := capiutils.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) + cluster, err := util.GetOwnerCluster(ctx, r.Client, gcpManagedControlPlane.ObjectMeta) if err != nil { log.Error(err, "Failed to retrieve owner Cluster from the API Server") return ctrl.Result{}, err @@ -109,7 +109,7 @@ func (r *GCPManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct return ctrl.Result{}, nil } - if capiutils.IsPaused(cluster, gcpManagedControlPlane) { + if annotations.IsPaused(cluster, gcpManagedControlPlane) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } @@ -207,7 +207,7 @@ func (r *GCPManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, } if managedControlPlaneScope.GCPManagedControlPlane != nil && - conditions.Get(managedControlPlaneScope.GCPManagedControlPlane, infrav1exp.GKEControlPlaneDeletingCondition).Reason == infrav1exp.GKEControlPlaneDeletedReason { + v1beta1conditions.Get(managedControlPlaneScope.GCPManagedControlPlane, infrav1exp.GKEControlPlaneDeletingCondition).Reason == infrav1exp.GKEControlPlaneDeletedReason { controllerutil.RemoveFinalizer(managedControlPlaneScope.GCPManagedControlPlane, infrav1exp.ManagedControlPlaneFinalizer) } diff --git a/exp/controllers/gcpmanagedmachinepool_controller.go b/exp/controllers/gcpmanagedmachinepool_controller.go index 99535936b..4f81f85c9 100644 --- a/exp/controllers/gcpmanagedmachinepool_controller.go +++ b/exp/controllers/gcpmanagedmachinepool_controller.go @@ -30,8 +30,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/cluster-api-provider-gcp/cloud" "sigs.k8s.io/cluster-api-provider-gcp/cloud/services/container/nodepools" - "sigs.k8s.io/cluster-api-provider-gcp/pkg/capiutils" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/annotations" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/record" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -40,7 +40,8 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/cloud/scope" infrav1exp "sigs.k8s.io/cluster-api-provider-gcp/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -88,7 +89,7 @@ func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.Map gk := gvk.GroupKind() // Return early if the GroupKind doesn't match what we expect - infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupKind() if gk != infraGK { log.FromContext(ctx).Info("gk does not match", "gk", gk, "infraGK", infraGK) return nil @@ -180,7 +181,7 @@ func (r *GCPManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, if err := c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToObjectFunc), - capiutils.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log), )); err != nil { return errors.Wrap(err, "failed adding a watch for ready clusters") } @@ -251,12 +252,12 @@ func (r *GCPManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr } // Get the cluster - cluster, err := capiutils.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) if err != nil { log.Info("Failed to retrieve Cluster from MachinePool") return ctrl.Result{}, err } - if capiutils.IsPaused(cluster, gcpManagedMachinePool) { + if annotations.IsPaused(cluster, gcpManagedMachinePool) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } @@ -286,7 +287,7 @@ func (r *GCPManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr if !gcpManagedControlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(gcpManagedMachinePool, infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.WaitingForGKEControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(gcpManagedMachinePool, infrav1exp.GKEMachinePoolReadyCondition, infrav1exp.WaitingForGKEControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -387,7 +388,7 @@ func (r *GCPManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, m } } - if conditions.Get(managedMachinePoolScope.GCPManagedMachinePool, infrav1exp.GKEMachinePoolDeletingCondition).Reason == infrav1exp.GKEMachinePoolDeletedReason { + if v1beta1conditions.Get(managedMachinePoolScope.GCPManagedMachinePool, infrav1exp.GKEMachinePoolDeletingCondition).Reason == infrav1exp.GKEMachinePoolDeletedReason { controllerutil.RemoveFinalizer(managedMachinePoolScope.GCPManagedMachinePool, infrav1exp.ManagedMachinePoolFinalizer) } diff --git a/main.go b/main.go index 37d782d65..dc85a557f 100644 --- a/main.go +++ b/main.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-gcp/feature" "sigs.k8s.io/cluster-api-provider-gcp/util/reconciler" "sigs.k8s.io/cluster-api-provider-gcp/version" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" capifeature "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/util/record" diff --git a/pkg/capiutils/predicates.go b/pkg/capiutils/predicates.go deleted file mode 100644 index 608fb34aa..000000000 --- a/pkg/capiutils/predicates.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package capiutils contains utility functions for working with Cluster API resources. -// These have mostly been inlined as part of the CAPI 1.10 -> 1.11 upgrade, -// and should be removed when we switch to reading CAPI v1beta2 objects. -// -// Deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped. -package capiutils - -import ( - "fmt" - - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - "sigs.k8s.io/cluster-api/util/predicates" - - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" -) - -// ClusterUpdateInfraReady returns a predicate that returns true for an update event when a cluster has Status.InfrastructureReady changed from false to true -// it also returns true if the resource provided is not a Cluster to allow for use with controller-runtime NewControllerManagedBy. -func ClusterUpdateInfraReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - log := logger.WithValues("predicate", "ClusterUpdateInfraReady", "eventType", "update") - if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { - log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) - } - - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) - return false - } - - newCluster := e.ObjectNew.(*clusterv1.Cluster) - - if !oldCluster.Status.InfrastructureReady && newCluster.Status.InfrastructureReady { - log.V(6).Info("Cluster infrastructure became ready, allowing further processing") - return true - } - - log.V(4).Info("Cluster infrastructure did not become ready, blocking further processing") - return false - }, - CreateFunc: func(event.CreateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, - } -} - -// ClusterPausedTransitions returns a predicate that returns true for an update event when a cluster has Spec.Paused changed. -func ClusterPausedTransitions(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { - return predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - log := logger.WithValues("predicate", "ClusterPausedTransitions", "eventType", "update") - if gvk, err := apiutil.GVKForObject(e.ObjectOld, scheme); err == nil { - log = log.WithValues(gvk.Kind, klog.KObj(e.ObjectOld)) - } - - oldCluster, ok := e.ObjectOld.(*clusterv1.Cluster) - if !ok { - log.V(4).Info("Expected Cluster", "type", fmt.Sprintf("%T", e.ObjectOld)) - return false - } - - newCluster := e.ObjectNew.(*clusterv1.Cluster) - - if oldCluster.Spec.Paused && !newCluster.Spec.Paused { - log.V(6).Info("Cluster unpausing, allowing further processing") - return true - } - - if !oldCluster.Spec.Paused && newCluster.Spec.Paused { - log.V(6).Info("Cluster pausing, allowing further processing") - return true - } - - // This predicate always work in "or" with Paused predicates - // so the logs are adjusted to not provide false negatives/verbosity at V<=5. - log.V(6).Info("Cluster paused state was not changed, blocking further processing") - return false - }, - CreateFunc: func(event.CreateEvent) bool { return false }, - DeleteFunc: func(event.DeleteEvent) bool { return false }, - GenericFunc: func(event.GenericEvent) bool { return false }, - } -} - -// ClusterPausedTransitionsOrInfrastructureReady returns a Predicate that returns true on Cluster Update events where -// either Cluster.Spec.Paused transitions or Cluster.Status.InfrastructureReady transitions to true. -// This implements a common requirement for some cluster-api and provider controllers (such as Machine Infrastructure -// controllers) to resume reconciliation when the Cluster gets paused or unpaused and when the infrastructure becomes ready. -// Example use: -// -// err := controller.Watch( -// source.Kind(cache, &clusterv1.Cluster{}), -// handler.EnqueueRequestsFromMapFunc(clusterToMachines) -// predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), r.Log), -// ) -func ClusterPausedTransitionsOrInfrastructureReady(scheme *runtime.Scheme, logger logr.Logger) predicate.Funcs { - log := logger.WithValues("predicate", "ClusterPausedTransitionsOrInfrastructureReady") - - return predicates.Any(scheme, log, ClusterPausedTransitions(scheme, log), ClusterUpdateInfraReady(scheme, log)) -} diff --git a/pkg/capiutils/utils.go b/pkg/capiutils/utils.go deleted file mode 100644 index f1905451a..000000000 --- a/pkg/capiutils/utils.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2025 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capiutils - -import ( - "context" - - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/client" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - capiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" -) - -// IsControlPlaneMachine checks machine is a control plane node. -func IsControlPlaneMachine(machine *capiv1beta1.Machine) bool { - _, ok := machine.Labels[capiv1beta1.MachineControlPlaneLabel] - return ok -} - -// GetOwnerCluster returns the Cluster object owning the current resource. -func GetOwnerCluster(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1beta1.Cluster, error) { - for _, ref := range obj.GetOwnerReferences() { - if ref.Kind != "Cluster" { - continue - } - gv, err := schema.ParseGroupVersion(ref.APIVersion) - if err != nil { - return nil, errors.WithStack(err) - } - if gv.Group == capiv1beta1.GroupVersion.Group { - return GetClusterByName(ctx, c, obj.Namespace, ref.Name) - } - } - return nil, nil -} - -// GetClusterFromMetadata returns the Cluster object (if present) using the object metadata. -func GetClusterFromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1beta1.Cluster, error) { - if obj.Labels[capiv1beta1.ClusterNameLabel] == "" { - return nil, errors.WithStack(util.ErrNoCluster) - } - return GetClusterByName(ctx, c, obj.Namespace, obj.Labels[capiv1beta1.ClusterNameLabel]) -} - -// GetClusterByName finds and return a Cluster object using the specified params. -func GetClusterByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1beta1.Cluster, error) { - cluster := &capiv1beta1.Cluster{} - key := client.ObjectKey{ - Namespace: namespace, - Name: name, - } - - if err := c.Get(ctx, key, cluster); err != nil { - return nil, errors.Wrapf(err, "failed to get Cluster/%s", name) - } - - return cluster, nil -} - -// IsPaused returns true if the Cluster is paused or the object has the `paused` annotation. -func IsPaused(cluster *capiv1beta1.Cluster, o metav1.Object) bool { - if cluster.Spec.Paused { - return true - } - return annotations.HasPaused(o) -} - -// GetOwnerMachine returns the Machine object owning the current resource. -func GetOwnerMachine(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*capiv1beta1.Machine, error) { - for _, ref := range obj.GetOwnerReferences() { - gv, err := schema.ParseGroupVersion(ref.APIVersion) - if err != nil { - return nil, err - } - if ref.Kind == "Machine" && gv.Group == capiv1beta1.GroupVersion.Group { - return GetMachineByName(ctx, c, obj.Namespace, ref.Name) - } - } - return nil, nil -} - -// GetMachineByName finds and return a Machine object using the specified params. -func GetMachineByName(ctx context.Context, c client.Client, namespace, name string) (*capiv1beta1.Machine, error) { - m := &capiv1beta1.Machine{} - key := client.ObjectKey{Name: name, Namespace: namespace} - if err := c.Get(ctx, key, m); err != nil { - return nil, err - } - return m, nil -} diff --git a/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml b/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml index abaf5dfe5..63943fd87 100644 --- a/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml +++ b/test/e2e/data/infrastructure-gcp/cluster-template-upgrades.yaml @@ -154,3 +154,22 @@ spec: spec: instanceType: "${GCP_NODE_MACHINE_TYPE}" image: "${KUBERNETES_IMAGE_UPGRADE_TO}" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "${CLUSTER_NAME}-crs-ccm" +data: ${CCM_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-ccm" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + ccm: "${CLUSTER_NAME}-crs-ccm" + resources: + - name: "${CLUSTER_NAME}-crs-ccm" + kind: ConfigMap