diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index cacff784f..338948b4a 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -586,6 +586,7 @@ func setupBoxcutter( ceReconciler.RevisionStatesGetter = &controllers.BoxcutterRevisionStatesGetter{Reader: mgr.GetClient()} ceReconciler.StorageMigrator = &applier.BoxcutterStorageMigrator{ Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), ActionClientGetter: acg, RevisionGenerator: rg, } diff --git a/internal/operator-controller/applier/boxcutter.go b/internal/operator-controller/applier/boxcutter.go index 036ba07ea..3c663b93f 100644 --- a/internal/operator-controller/applier/boxcutter.go +++ b/internal/operator-controller/applier/boxcutter.go @@ -84,6 +84,9 @@ func (r *SimpleRevisionGenerator) GenerateRevisionFromHelmRelease( }) rev.Name = fmt.Sprintf("%s-1", ext.Name) rev.Spec.Revision = 1 + // Explicitly set LifecycleState to Active to ensure migrated revision is recognized as active + // even before CRD default is applied by the API server + rev.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateActive return rev, nil } @@ -140,12 +143,18 @@ func (r *SimpleRevisionGenerator) buildClusterExtensionRevision( ext *ocv1.ClusterExtension, annotations map[string]string, ) *ocv1.ClusterExtensionRevision { + revisionLabels := map[string]string{ + controllers.ClusterExtensionRevisionOwnerLabel: ext.Name, + } + // Package name must be in labels for BoxcutterRevisionStatesGetter to find it + if pkg, ok := annotations[labels.PackageNameKey]; ok { + revisionLabels[labels.PackageNameKey] = pkg + } + return &ocv1.ClusterExtensionRevision{ ObjectMeta: metav1.ObjectMeta{ Annotations: annotations, - Labels: map[string]string{ - controllers.ClusterExtensionRevisionOwnerLabel: ext.Name, - }, + Labels: revisionLabels, }, Spec: ocv1.ClusterExtensionRevisionSpec{ Phases: PhaseSort(objects), @@ -153,17 +162,38 @@ func (r *SimpleRevisionGenerator) buildClusterExtensionRevision( } } +// BoxcutterStorageMigrator migrates ClusterExtensions from Helm-based storage to Boxcutter's +// ClusterExtensionRevision storage. +// +// Scenario: Given a ClusterExtension, determine if migration from Helm storage is needed +// - When ClusterExtensionRevisions already exist, no migration needed (return early) +// - When a Helm release exists but no revisions, create ClusterExtensionRevision from Helm release +// - When no Helm release exists and no revisions, this is a fresh install (no migration needed) +// +// The migrated revision includes: +// - Owner label (olm.operatorframework.io/owner) for label-based lookups +// - OwnerReference to parent ClusterExtension for proper garbage collection +// - All objects from the Helm release manifest +// +// This ensures migrated revisions have the same structure as revisions created by normal +// Boxcutter flow, enabling successful upgrades after migration. type BoxcutterStorageMigrator struct { ActionClientGetter helmclient.ActionClientGetter RevisionGenerator ClusterExtensionRevisionGenerator Client boxcutterStorageMigratorClient + Scheme *runtime.Scheme // Required for setting ownerReferences on migrated revisions } type boxcutterStorageMigratorClient interface { List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error + Status() client.StatusWriter } +// Migrate performs a one-time migration from Helm storage to ClusterExtensionRevision storage. +// +// This enables upgrades from older operator-controller versions that used Helm for state management. +// The migration is idempotent - it checks for existing revisions before attempting to migrate. func (m *BoxcutterStorageMigrator) Migrate(ctx context.Context, ext *ocv1.ClusterExtension, objectLabels map[string]string) error { existingRevisionList := ocv1.ClusterExtensionRevisionList{} if err := m.Client.List(ctx, &existingRevisionList, client.MatchingLabels{ @@ -195,9 +225,39 @@ func (m *BoxcutterStorageMigrator) Migrate(ctx context.Context, ext *ocv1.Cluste return err } + // Set ownerReference to ensure proper lifecycle management and garbage collection. + // This makes the migrated revision consistent with revisions created by normal Boxcutter flow. + if err := controllerutil.SetControllerReference(ext, rev, m.Scheme); err != nil { + return fmt.Errorf("set ownerref: %w", err) + } + if err := m.Client.Create(ctx, rev); err != nil { return err } + + // Mark the migrated revision as succeeded since the Helm release was already deployed. + // Without Succeeded=True, BoxcutterRevisionStatesGetter treats it as RollingOut instead of Installed, + // which breaks resolution and prevents upgrades. + // Note: After Create(), rev.Generation is populated by the API server (typically 1 for new objects) + meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{ + Type: ocv1.ClusterExtensionRevisionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: ocv1.ClusterExtensionRevisionReasonAvailable, + Message: "Migrated from Helm storage", + ObservedGeneration: rev.Generation, + }) + meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{ + Type: ocv1.ClusterExtensionRevisionTypeSucceeded, + Status: metav1.ConditionTrue, + Reason: ocv1.ClusterExtensionRevisionReasonRolloutSuccess, + Message: "Migrated from Helm storage", + ObservedGeneration: rev.Generation, + }) + + if err := m.Client.Status().Update(ctx, rev); err != nil { + return fmt.Errorf("updating migrated revision status: %w", err) + } + return nil } @@ -270,6 +330,12 @@ func (bc *Boxcutter) apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust case err == nil: // inplace patch was successful, no changes in phases state = StateUnchanged + // Re-fetch the revision to get updated metadata and status after the patch + updated := &ocv1.ClusterExtensionRevision{} + if err := bc.Client.Get(ctx, client.ObjectKey{Name: currentRevision.Name}, updated); err != nil { + return false, "", fmt.Errorf("fetching updated revision: %w", err) + } + currentRevision = updated default: return false, "", fmt.Errorf("patching %s Revision: %w", desiredRevision.Name, err) } diff --git a/internal/operator-controller/applier/boxcutter_test.go b/internal/operator-controller/applier/boxcutter_test.go index 9da1ddb4a..7ac3a91e6 100644 --- a/internal/operator-controller/applier/boxcutter_test.go +++ b/internal/operator-controller/applier/boxcutter_test.go @@ -68,12 +68,17 @@ func Test_SimpleRevisionGenerator_GenerateRevisionFromHelmRelease(t *testing.T) "olm.operatorframework.io/bundle-version": "1.2.0", "olm.operatorframework.io/package-name": "my-package", }, + // Both owner and package-name labels are required: + // - owner: identifies which ClusterExtension owns this revision (for label-based queries) + // - package-name: needed by BoxcutterRevisionStatesGetter to populate RevisionMetadata.Package field Labels: map[string]string{ - "olm.operatorframework.io/owner": "test-123", + "olm.operatorframework.io/owner": "test-123", + "olm.operatorframework.io/package-name": "my-package", }, }, Spec: ocv1.ClusterExtensionRevisionSpec{ - Revision: 1, + LifecycleState: ocv1.ClusterExtensionRevisionLifecycleStateActive, + Revision: 1, Phases: []ocv1.ClusterExtensionRevisionPhase{ { Name: "deploy", @@ -758,6 +763,99 @@ func TestBoxcutter_Apply(t *testing.T) { assert.Contains(t, latest.Spec.Previous, ocv1.ClusterExtensionRevisionPrevious{Name: "rev-4"}) }, }, + { + name: "annotation-only update (same phases, different annotations)", + mockBuilder: &mockBundleRevisionBuilder{ + makeRevisionFunc: func(bundleFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) (*ocv1.ClusterExtensionRevision, error) { + revisionLabels := map[string]string{ + controllers.ClusterExtensionRevisionOwnerLabel: ext.Name, + } + if pkg, ok := revisionAnnotations[labels.PackageNameKey]; ok { + revisionLabels[labels.PackageNameKey] = pkg + } + return &ocv1.ClusterExtensionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: revisionAnnotations, + Labels: revisionLabels, + }, + Spec: ocv1.ClusterExtensionRevisionSpec{ + Phases: []ocv1.ClusterExtensionRevisionPhase{ + { + Name: string(applier.PhaseDeploy), + Objects: []ocv1.ClusterExtensionRevisionObject{ + { + Object: unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cm", + }, + }, + }, + }, + }, + }, + }, + }, + }, nil + }, + }, + existingObjs: []client.Object{ + ext, + &ocv1.ClusterExtensionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ext-1", + Annotations: map[string]string{ + labels.BundleVersionKey: "1.0.0", + labels.PackageNameKey: "test-package", + }, + Labels: map[string]string{ + controllers.ClusterExtensionRevisionOwnerLabel: ext.Name, + labels.PackageNameKey: "test-package", + }, + }, + Spec: ocv1.ClusterExtensionRevisionSpec{ + Revision: 1, + Phases: []ocv1.ClusterExtensionRevisionPhase{ + { + Name: string(applier.PhaseDeploy), + Objects: []ocv1.ClusterExtensionRevisionObject{ + { + Object: unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cm", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + validate: func(t *testing.T, c client.Client) { + revList := &ocv1.ClusterExtensionRevisionList{} + err := c.List(context.Background(), revList, client.MatchingLabels{controllers.ClusterExtensionRevisionOwnerLabel: ext.Name}) + require.NoError(t, err) + // Should still be only 1 revision (in-place update, not new revision) + require.Len(t, revList.Items, 1) + + rev := revList.Items[0] + assert.Equal(t, "test-ext-1", rev.Name) + assert.Equal(t, int64(1), rev.Spec.Revision) + // Verify annotations were updated + assert.Equal(t, "1.0.1", rev.Annotations[labels.BundleVersionKey]) + assert.Equal(t, "test-package", rev.Annotations[labels.PackageNameKey]) + // Verify labels still have package name + assert.Equal(t, ext.Name, rev.Labels[controllers.ClusterExtensionRevisionOwnerLabel]) + assert.Equal(t, "test-package", rev.Labels[labels.PackageNameKey]) + }, + }, } for _, tc := range testCases { @@ -780,7 +878,15 @@ func TestBoxcutter_Apply(t *testing.T) { testFS := fstest.MapFS{} // Execute - installSucceeded, installStatus, err := boxcutter.Apply(t.Context(), testFS, ext, nil, nil) + revisionAnnotations := map[string]string{} + if tc.name == "annotation-only update (same phases, different annotations)" { + // For annotation-only update test, pass NEW annotations + revisionAnnotations = map[string]string{ + labels.BundleVersionKey: "1.0.1", + labels.PackageNameKey: "test-package", + } + } + installSucceeded, installStatus, err := boxcutter.Apply(t.Context(), testFS, ext, nil, revisionAnnotations) // Assert if tc.expectedErr != "" { @@ -808,6 +914,9 @@ func TestBoxcutter_Apply(t *testing.T) { func TestBoxcutterStorageMigrator(t *testing.T) { t.Run("creates revision", func(t *testing.T) { + testScheme := runtime.NewScheme() + require.NoError(t, ocv1.AddToScheme(testScheme)) + brb := &mockBundleRevisionBuilder{} mag := &mockActionGetter{} client := &clientMock{} @@ -815,6 +924,7 @@ func TestBoxcutterStorageMigrator(t *testing.T) { RevisionGenerator: brb, ActionClientGetter: mag, Client: client, + Scheme: testScheme, } ext := &ocv1.ClusterExtension{ @@ -836,6 +946,9 @@ func TestBoxcutterStorageMigrator(t *testing.T) { }) t.Run("does not create revision when revisions exist", func(t *testing.T) { + testScheme := runtime.NewScheme() + require.NoError(t, ocv1.AddToScheme(testScheme)) + brb := &mockBundleRevisionBuilder{} mag := &mockActionGetter{} client := &clientMock{} @@ -843,6 +956,7 @@ func TestBoxcutterStorageMigrator(t *testing.T) { RevisionGenerator: brb, ActionClientGetter: mag, Client: client, + Scheme: testScheme, } ext := &ocv1.ClusterExtension{ @@ -866,6 +980,9 @@ func TestBoxcutterStorageMigrator(t *testing.T) { }) t.Run("does not create revision when no helm release", func(t *testing.T) { + testScheme := runtime.NewScheme() + require.NoError(t, ocv1.AddToScheme(testScheme)) + brb := &mockBundleRevisionBuilder{} mag := &mockActionGetter{ getClientErr: driver.ErrReleaseNotFound, @@ -875,6 +992,7 @@ func TestBoxcutterStorageMigrator(t *testing.T) { RevisionGenerator: brb, ActionClientGetter: mag, Client: client, + Scheme: testScheme, } ext := &ocv1.ClusterExtension{ @@ -905,7 +1023,15 @@ func (m *mockBundleRevisionBuilder) GenerateRevisionFromHelmRelease( helmRelease *release.Release, ext *ocv1.ClusterExtension, objectLabels map[string]string, ) (*ocv1.ClusterExtensionRevision, error) { - return nil, nil + return &ocv1.ClusterExtensionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-revision", + Labels: map[string]string{ + controllers.ClusterExtensionRevisionOwnerLabel: ext.Name, + }, + }, + Spec: ocv1.ClusterExtensionRevisionSpec{}, + }, nil } type clientMock struct { @@ -921,3 +1047,24 @@ func (m *clientMock) Create(ctx context.Context, obj client.Object, opts ...clie args := m.Called(ctx, obj, opts) return args.Error(0) } + +func (m *clientMock) Status() client.StatusWriter { + return &statusWriterMock{mock: &m.Mock} +} + +type statusWriterMock struct { + mock *mock.Mock +} + +func (s *statusWriterMock) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + // Status updates are expected during migration - return success by default + return nil +} + +func (s *statusWriterMock) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + return nil +} + +func (s *statusWriterMock) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + return nil +} diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller.go b/internal/operator-controller/controllers/clusterextensionrevision_controller.go index 888249161..292b8fef9 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller.go @@ -16,7 +16,6 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -116,7 +115,17 @@ func checkForUnexpectedClusterExtensionRevisionFieldChange(a, b ocv1.ClusterExte func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev *ocv1.ClusterExtensionRevision) (ctrl.Result, error) { l := log.FromContext(ctx) - revision, opts, previous := toBoxcutterRevision(rev) + revision, opts, err := c.toBoxcutterRevision(ctx, rev) + if err != nil { + meta.SetStatusCondition(&rev.Status.Conditions, metav1.Condition{ + Type: ocv1.ClusterExtensionRevisionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: ocv1.ClusterExtensionRevisionReasonReconcileFailure, + Message: err.Error(), + ObservedGeneration: rev.Generation, + }) + return ctrl.Result{}, fmt.Errorf("converting to boxcutter revision: %v", err) + } if !rev.DeletionTimestamp.IsZero() || rev.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived { return c.teardown(ctx, rev, revision) @@ -212,7 +221,11 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev //nolint:nestif if rres.IsComplete() { - // Archive other revisions. + // Archive previous revisions + previous, err := c.ListPreviousRevisions(ctx, rev) + if err != nil { + return ctrl.Result{}, fmt.Errorf("listing previous revisions: %v", err) + } for _, a := range previous { patch := []byte(`{"spec":{"lifecycleState":"Archived"}}`) if err := c.Client.Patch(ctx, a, client.RawPatch(types.MergePatchType, patch)); err != nil { @@ -428,14 +441,53 @@ func (c *ClusterExtensionRevisionReconciler) removeFinalizer(ctx context.Context return nil } -func toBoxcutterRevision(rev *ocv1.ClusterExtensionRevision) (*boxcutter.Revision, []boxcutter.RevisionReconcileOption, []client.Object) { - previous := make([]client.Object, 0, len(rev.Spec.Previous)) - for _, specPrevious := range rev.Spec.Previous { - prev := &unstructured.Unstructured{} - prev.SetName(specPrevious.Name) - prev.SetUID(specPrevious.UID) - prev.SetGroupVersionKind(ocv1.GroupVersion.WithKind(ocv1.ClusterExtensionRevisionKind)) - previous = append(previous, prev) +// ListPreviousRevisions returns active revisions belonging to the same ClusterExtension with lower revision numbers. +// +// Scenario: Given a ClusterExtensionRevision, find all related active previous revisions +// - When the revision has no owner label, return empty list (revision not properly labeled) +// - When looking up by owner label, filter out the current revision itself +// - When a revision is archived or being deleted, exclude it from results +// - When a revision has a higher or equal revision number, exclude it (not a previous revision) +// - Otherwise include active and paused revisions with matching owner +func (c *ClusterExtensionRevisionReconciler) ListPreviousRevisions(ctx context.Context, rev *ocv1.ClusterExtensionRevision) ([]client.Object, error) { + ownerLabel, ok := rev.Labels[ClusterExtensionRevisionOwnerLabel] + if !ok { + // No owner label means this revision isn't properly labeled - return empty list + return nil, nil + } + + revList := &ocv1.ClusterExtensionRevisionList{} + if err := c.Client.List(ctx, revList, client.MatchingLabels{ + ClusterExtensionRevisionOwnerLabel: ownerLabel, + }); err != nil { + return nil, fmt.Errorf("listing revisions: %w", err) + } + + previous := make([]client.Object, 0, len(revList.Items)) + for i := range revList.Items { + r := &revList.Items[i] + if r.Name == rev.Name { + continue + } + // Skip archived or deleting revisions + if r.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived || + !r.DeletionTimestamp.IsZero() { + continue + } + // Only archive revisions with lower revision numbers (actual previous revisions) + if r.Spec.Revision >= rev.Spec.Revision { + continue + } + previous = append(previous, r) + } + + return previous, nil +} + +func (c *ClusterExtensionRevisionReconciler) toBoxcutterRevision(ctx context.Context, rev *ocv1.ClusterExtensionRevision) (*boxcutter.Revision, []boxcutter.RevisionReconcileOption, error) { + previous, err := c.ListPreviousRevisions(ctx, rev) + if err != nil { + return nil, nil, fmt.Errorf("listing previous revisions: %w", err) } opts := []boxcutter.RevisionReconcileOption{ @@ -476,7 +528,7 @@ func toBoxcutterRevision(rev *ocv1.ClusterExtensionRevision) (*boxcutter.Revisio if rev.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStatePaused { opts = append(opts, boxcutter.WithPaused{}) } - return r, opts, previous + return r, opts, nil } var ( diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go index 873a6cc74..7cda07d00 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go @@ -267,10 +267,13 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionProgression(t *te existingObjs: func() []client.Object { ext := newTestClusterExtension() prevRev1 := newTestClusterExtensionRevision("prev-rev-1") + prevRev1.Spec.Revision = 1 require.NoError(t, controllerutil.SetControllerReference(ext, prevRev1, testScheme)) prevRev2 := newTestClusterExtensionRevision("prev-rev-2") + prevRev2.Spec.Revision = 2 require.NoError(t, controllerutil.SetControllerReference(ext, prevRev2, testScheme)) rev1 := newTestClusterExtensionRevision("test-ext-1") + rev1.Spec.Revision = 3 rev1.Spec.Previous = []ocv1.ClusterExtensionRevisionPrevious{ { Name: "prev-rev-1", @@ -315,7 +318,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_RevisionProgression(t *te return tc.revisionResult, nil }, }, - TrackingCache: &mockTrackingCache{}, + TrackingCache: &mockTrackingCache{client: testClient}, }).Reconcile(t.Context(), ctrl.Request{ NamespacedName: types.NamespacedName{ Name: clusterExtensionRevisionName, @@ -431,7 +434,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_ValidationError_Retries(t return tc.revisionResult, nil }, }, - TrackingCache: &mockTrackingCache{}, + TrackingCache: &mockTrackingCache{client: testClient}, }).Reconcile(t.Context(), ctrl.Request{ NamespacedName: types.NamespacedName{ Name: clusterExtensionRevisionName, @@ -661,7 +664,7 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { }, teardown: tc.revisionEngineTeardownFn(t), }, - TrackingCache: &mockTrackingCache{}, + TrackingCache: &mockTrackingCache{client: testClient}, }).Reconcile(t.Context(), ctrl.Request{ NamespacedName: types.NamespacedName{ Name: clusterExtensionRevisionName, @@ -682,6 +685,145 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { } } +func Test_ClusterExtensionRevisionReconciler_ListPreviousRevisions(t *testing.T) { + testScheme := newScheme(t) + + for _, tc := range []struct { + name string + existingObjs func() []client.Object + currentRev string + expectedRevs []string + }{ + { + // Scenario: + // - Three revisions belong to the same owner. + // - We ask for previous revisions of rev-2. + // - Only revisions with lower revision numbers are returned (rev-1). + // - Higher revision numbers (rev-3) are excluded. + name: "should skip current revision when listing previous", + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + rev1 := newTestClusterExtensionRevision("rev-1") + rev2 := newTestClusterExtensionRevision("rev-2") + rev3 := newTestClusterExtensionRevision("rev-3") + require.NoError(t, controllerutil.SetControllerReference(ext, rev1, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext, rev2, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext, rev3, testScheme)) + return []client.Object{ext, rev1, rev2, rev3} + }, + currentRev: "rev-2", + expectedRevs: []string{"rev-1"}, + }, + { + // Scenario: + // - One sibling is archived already. + // - The caller should not get archived items. + // - Only active siblings are returned. + name: "should drop archived revisions when listing previous", + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + rev1 := newTestClusterExtensionRevision("rev-1") + rev2 := newTestClusterExtensionRevision("rev-2") + rev2.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived + rev3 := newTestClusterExtensionRevision("rev-3") + require.NoError(t, controllerutil.SetControllerReference(ext, rev1, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext, rev2, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext, rev3, testScheme)) + return []client.Object{ext, rev1, rev2, rev3} + }, + currentRev: "rev-3", + expectedRevs: []string{"rev-1"}, + }, + { + // Scenario: + // - One sibling is being deleted. + // - We list previous revisions. + // - The deleting one is filtered out. + name: "should drop deleting revisions when listing previous", + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + rev1 := newTestClusterExtensionRevision("rev-1") + rev2 := newTestClusterExtensionRevision("rev-2") + rev2.Finalizers = []string{"test-finalizer"} + rev2.DeletionTimestamp = &metav1.Time{Time: time.Now()} + rev3 := newTestClusterExtensionRevision("rev-3") + require.NoError(t, controllerutil.SetControllerReference(ext, rev1, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext, rev2, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext, rev3, testScheme)) + return []client.Object{ext, rev1, rev2, rev3} + }, + currentRev: "rev-3", + expectedRevs: []string{"rev-1"}, + }, + { + // Scenario: + // - Two different owners have revisions. + // - The owner label is used as the filter. + // - Only siblings with the same owner come back. + name: "should only include revisions matching owner label", + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + ext2 := newTestClusterExtension() + ext2.Name = "test-ext-2" + ext2.UID = "test-ext-2" + + rev1 := newTestClusterExtensionRevision("rev-1") + rev2 := newTestClusterExtensionRevision("rev-2") + rev2.Labels[controllers.ClusterExtensionRevisionOwnerLabel] = "test-ext-2" + rev3 := newTestClusterExtensionRevision("rev-3") + require.NoError(t, controllerutil.SetControllerReference(ext, rev1, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext2, rev2, testScheme)) + require.NoError(t, controllerutil.SetControllerReference(ext, rev3, testScheme)) + return []client.Object{ext, ext2, rev1, rev2, rev3} + }, + currentRev: "rev-3", + expectedRevs: []string{"rev-1"}, + }, + { + // Scenario: + // - The revision has no owner label. + // - Without the label we skip the lookup. + // - The function returns an empty list. + name: "should return empty list when owner label missing", + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + rev1 := newTestClusterExtensionRevision("rev-1") + delete(rev1.Labels, controllers.ClusterExtensionRevisionOwnerLabel) + require.NoError(t, controllerutil.SetControllerReference(ext, rev1, testScheme)) + return []client.Object{ext, rev1} + }, + currentRev: "rev-1", + expectedRevs: []string{}, + }, + } { + t.Run(tc.name, func(t *testing.T) { + testClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithObjects(tc.existingObjs()...). + Build() + + reconciler := &controllers.ClusterExtensionRevisionReconciler{ + Client: testClient, + TrackingCache: &mockTrackingCache{client: testClient}, + } + + currentRev := &ocv1.ClusterExtensionRevision{} + err := testClient.Get(t.Context(), client.ObjectKey{Name: tc.currentRev}, currentRev) + require.NoError(t, err) + + previous, err := reconciler.ListPreviousRevisions(t.Context(), currentRev) + require.NoError(t, err) + + var names []string + for _, rev := range previous { + names = append(names, rev.GetName()) + } + + require.ElementsMatch(t, tc.expectedRevs, names) + }) + } +} + func newTestClusterExtension() *ocv1.ClusterExtension { return &ocv1.ClusterExtension{ ObjectMeta: metav1.ObjectMeta{ @@ -704,13 +846,22 @@ func newTestClusterExtension() *ocv1.ClusterExtension { } func newTestClusterExtensionRevision(name string) *ocv1.ClusterExtensionRevision { + // Extract revision number from name (e.g., "rev-1" -> 1, "test-ext-2" -> 2) + revNum := int64(1) + if len(name) > 0 && name[len(name)-1] >= '0' && name[len(name)-1] <= '9' { + revNum = int64(name[len(name)-1] - '0') + } return &ocv1.ClusterExtensionRevision{ ObjectMeta: metav1.ObjectMeta{ Name: name, UID: types.UID(name), Generation: int64(1), + Labels: map[string]string{ + controllers.ClusterExtensionRevisionOwnerLabel: "test-ext", + }, }, Spec: ocv1.ClusterExtensionRevisionSpec{ + Revision: revNum, Phases: []ocv1.ClusterExtensionRevisionPhase{ { Name: "everything", @@ -879,14 +1030,16 @@ func (m mockRevisionTeardownResult) String() string { return m.string } -type mockTrackingCache struct{} +type mockTrackingCache struct { + client client.Client +} func (m *mockTrackingCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { - panic("not implemented") + return m.client.Get(ctx, key, obj, opts...) } func (m *mockTrackingCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - panic("not implemented") + return m.client.List(ctx, list, opts...) } func (m *mockTrackingCache) Source(handler handler.EventHandler, predicates ...predicate.Predicate) source.Source {