From bf529a332e2bff47330e856a8f6bb365fbf3ac6d Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Tue, 9 Sep 2025 00:38:20 +1000 Subject: [PATCH 1/3] Merged from #191 to avoid conflicts Signed-off-by: michaelawyu --- Makefile | 2 + .../workapplier/availability_tracker_test.go | 2 +- .../workapplier/backoff_integration_test.go | 524 +++++++ pkg/controllers/workapplier/backoff_test.go | 68 +- pkg/controllers/workapplier/controller.go | 41 +- ...roller_integration_migrated_helper_test.go | 8 +- .../controller_integration_migrated_test.go | 70 +- .../controller_integration_test.go | 1211 ++++++++++++++--- .../drift_detection_takeover_test.go | 8 +- pkg/controllers/workapplier/preprocess.go | 96 +- .../workapplier/preprocess_test.go | 94 ++ pkg/controllers/workapplier/status.go | 6 +- pkg/controllers/workapplier/status_test.go | 36 +- pkg/controllers/workapplier/suite_test.go | 179 ++- test/e2e/placement_negative_cases_test.go | 194 ++- 15 files changed, 2141 insertions(+), 398 deletions(-) create mode 100644 pkg/controllers/workapplier/backoff_integration_test.go diff --git a/Makefile b/Makefile index 8c7a8e190..8815749f1 100644 --- a/Makefile +++ b/Makefile @@ -137,6 +137,8 @@ test: manifests generate fmt vet local-unit-test integration-test## Run tests. ## workaround to bypass the pkg/controllers/workv1alpha1 tests failure ## rollout controller tests need a bit longer to complete, so we increase the timeout ## +# Set up the timeout parameters as some of the test lengths have exceeded the default 10 minute mark. +# TO-DO (chenyu1): enable parallelization for single package integration tests. .PHONY: local-unit-test local-unit-test: $(ENVTEST) ## Run tests. export CGO_ENABLED=1 && \ diff --git a/pkg/controllers/workapplier/availability_tracker_test.go b/pkg/controllers/workapplier/availability_tracker_test.go index 1127cf612..03a212f83 100644 --- a/pkg/controllers/workapplier/availability_tracker_test.go +++ b/pkg/controllers/workapplier/availability_tracker_test.go @@ -1013,7 +1013,7 @@ func TestTrackInMemberClusterObjAvailabilityByGVR(t *testing.T) { // TestTrackInMemberClusterObjAvailability tests the trackInMemberClusterObjAvailability method. func TestTrackInMemberClusterObjAvailability(t *testing.T) { ctx := context.Background() - workRef := klog.KRef(memberReservedNSName, workName) + workRef := klog.KRef(memberReservedNSName1, workName) availableDeploy := deploy.DeepCopy() availableDeploy.Status = appsv1.DeploymentStatus{ diff --git a/pkg/controllers/workapplier/backoff_integration_test.go b/pkg/controllers/workapplier/backoff_integration_test.go new file mode 100644 index 000000000..cdc857298 --- /dev/null +++ b/pkg/controllers/workapplier/backoff_integration_test.go @@ -0,0 +1,524 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workapplier + +import ( + "fmt" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" +) + +// Note (chenyu1): all test cases in this file use a separate test environment +// (same hub cluster, different fleet member reserved namespace, different +// work applier instance) from the other integration tests. This is needed +// to (relatively speaking) reliably verify the exponential backoff behavior +// in the work applier. + +var ( + diffObservationTimeChangedActual = func( + workName string, + wantWorkStatus *fleetv1beta1.WorkStatus, + curDiffObservedTime, lastDiffObservedTime *metav1.Time, + ) func() error { + return func() error { + // Retrieve the Work object. + work := &fleetv1beta1.Work{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName2}, work); err != nil { + return fmt.Errorf("failed to retrieve the Work object: %w", err) + } + + // Verify that the status never changed (except for timestamps). + if diff := cmp.Diff( + &work.Status, wantWorkStatus, + ignoreFieldConditionLTTMsg, + ignoreDiffDetailsObsTime, ignoreDriftDetailsObsTime, + cmpopts.SortSlices(lessFuncPatchDetail), + ); diff != "" { + StopTrying("the work object status has changed unexpectedly").Wrap(fmt.Errorf("work status diff (-got, +want):\n%s", diff)).Now() + } + + curDiffObservedTime.Time = work.Status.ManifestConditions[0].DiffDetails.ObservationTime.Time + if !curDiffObservedTime.Equal(lastDiffObservedTime) { + return nil + } + return fmt.Errorf("the diff observation time remains unchanged") + } + } + + driftObservationTimeChangedActual = func( + workName string, + wantWorkStatus *fleetv1beta1.WorkStatus, + curDriftObservedTime, lastDriftObservedTime *metav1.Time, + ) func() error { + return func() error { + // Retrieve the Work object. + work := &fleetv1beta1.Work{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName2}, work); err != nil { + return fmt.Errorf("failed to retrieve the Work object: %w", err) + } + + // Verify that the status never changed (except for timestamps). + if diff := cmp.Diff( + &work.Status, wantWorkStatus, + ignoreFieldConditionLTTMsg, + ignoreDiffDetailsObsTime, ignoreDriftDetailsObsTime, + cmpopts.SortSlices(lessFuncPatchDetail), + ); diff != "" { + StopTrying("the work object status has changed unexpectedly").Wrap(fmt.Errorf("work status diff (-got, +want):\n%s", diff)).Now() + } + + curDriftObservedTime.Time = work.Status.ManifestConditions[0].DriftDetails.ObservationTime.Time + if !curDriftObservedTime.Equal(lastDriftObservedTime) { + return nil + } + return fmt.Errorf("the drift observation time remains unchanged") + } + } +) + +var _ = Describe("exponential backoff", func() { + Context("slow backoff and fast backoff", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var regularNS *corev1.Namespace + var lastDiffObservedTime *metav1.Time + + wantWorkStatus := &fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsAppliedReason, + ObservedGeneration: 1, + }, + }, + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFailedToTakeOver), + ObservedGeneration: 0, + }, + }, + DiffDetails: &fleetv1beta1.DiffDetails{ + ObservedInMemberClusterGeneration: ptr.To(int64(0)), + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: fmt.Sprintf("/metadata/labels/%s", dummyLabelKey), + ValueInMember: dummyLabelValue2, + ValueInHub: dummyLabelValue1, + }, + }, + }, + }, + }, + } + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNS.Labels = map[string]string{ + dummyLabelKey: dummyLabelValue1, + } + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Create the NS object on the member cluster, with a different label value. + preExistingNS := ns.DeepCopy() + preExistingNS.Name = nsName + preExistingNS.Labels = map[string]string{ + dummyLabelKey: dummyLabelValue2, + } + Expect(memberClient2.Create(ctx, preExistingNS)).To(Succeed(), "Failed to create pre-existing NS") + + // Create a new Work object with all the manifest JSONs. + // + // This Work object uses an apply strategy that allows takeover but will check for diffs (partial + // comparison). Due to the presence of diffs, the Work object will be considered to be of a state + // of apply op failure. + applyStrategy := &fleetv1beta1.ApplyStrategy{ + WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeIfNoDiff, + } + createWorkObject(workName, memberReservedNSName2, applyStrategy, regularNSJSON) + }) + + // For simplicity reasons, this test case will skip some of the regular apply op result verification + // (finalizer check, AppliedWork object check, etc.). + + It("should update the Work object status", func() { + // Prepare the status information. + + // Use custom check logic so that the test case can track timestamps across steps. + Eventually(func() error { + // Retrieve the Work object. + work := &fleetv1beta1.Work{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName2}, work); err != nil { + return fmt.Errorf("failed to retrieve the Work object: %w", err) + } + + if diff := cmp.Diff( + &work.Status, wantWorkStatus, + ignoreFieldConditionLTTMsg, + ignoreDiffDetailsObsTime, ignoreDriftDetailsObsTime, + cmpopts.SortSlices(lessFuncPatchDetail), + ); diff != "" { + return fmt.Errorf("work status diff (-got, +want):\n%s", diff) + } + + // Track the observation timestamp of the diff details. + lastDiffObservedTime = &work.Status.ManifestConditions[0].DiffDetails.ObservationTime + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update Work object status") + }) + + It("should wait for the fixed delay period of time before next reconciliation", func() { + curDiffObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(diffObservationTimeChangedActual(workName, wantWorkStatus, curDiffObservedTime, lastDiffObservedTime), eventuallyDuration*2, eventuallyInterval).Should(Or(Succeed())) + + // Fixed delay is set to 10 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDiffObservedTime.Sub(lastDiffObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*9), + BeNumerically("<=", time.Second*11), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDiffObservedTime = curDiffObservedTime + }) + + It("should start to back off slowly (attempt 1)", func() { + curDiffObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(diffObservationTimeChangedActual(workName, wantWorkStatus, curDiffObservedTime, lastDiffObservedTime), eventuallyDuration*3, eventuallyInterval).Should(Or(Succeed())) + + // The first slow backoff delay is 20 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDiffObservedTime.Sub(lastDiffObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*19), + BeNumerically("<=", time.Second*21), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDiffObservedTime = curDiffObservedTime + }) + + It("should start to back off slowly (attempt 2)", func() { + curDiffObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(diffObservationTimeChangedActual(workName, wantWorkStatus, curDiffObservedTime, lastDiffObservedTime), eventuallyDuration*4, eventuallyInterval).Should(Or(Succeed())) + + // The second slow backoff delay is 30 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDiffObservedTime.Sub(lastDiffObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*29), + BeNumerically("<=", time.Second*31), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDiffObservedTime = curDiffObservedTime + }) + + It("should start to back off fastly (attempt #1)", func() { + curDiffObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(diffObservationTimeChangedActual(workName, wantWorkStatus, curDiffObservedTime, lastDiffObservedTime), eventuallyDuration*7, eventuallyInterval).Should(Or(Succeed())) + + // The first fast backoff delay is 60 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDiffObservedTime.Sub(lastDiffObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*59), + BeNumerically("<=", time.Second*61), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDiffObservedTime = curDiffObservedTime + }) + + It("should reach maximum backoff", func() { + curDiffObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(diffObservationTimeChangedActual(workName, wantWorkStatus, curDiffObservedTime, lastDiffObservedTime), eventuallyDuration*10, eventuallyInterval).Should(Or(Succeed())) + + // The maximum backoff delay is 60 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDiffObservedTime.Sub(lastDiffObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*89), + BeNumerically("<=", time.Second*91), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDiffObservedTime = curDiffObservedTime + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName2) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt to verify its deletion. + }) + }) + + Context("skip to fast backoff (available)", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var regularNS *corev1.Namespace + var lastDriftObservedTime *metav1.Time + + wantWorkStatus := &fleetv1beta1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAppliedReason, + ObservedGeneration: 1, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAvailableReason, + ObservedGeneration: 1, + }, + }, + ManifestConditions: []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + DriftDetails: &fleetv1beta1.DriftDetails{ + ObservedInMemberClusterGeneration: 0, + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: fmt.Sprintf("/metadata/labels/%s", dummyLabelKey), + ValueInMember: dummyLabelValue2, + }, + { + Path: "/spec/finalizers", + ValueInMember: "[kubernetes]", + }, + }, + }, + }, + }, + } + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Create the NS object on the member cluster, with a different label value. + preExistingNS := ns.DeepCopy() + preExistingNS.Name = nsName + preExistingNS.Labels = map[string]string{ + dummyLabelKey: dummyLabelValue2, + } + Expect(memberClient2.Create(ctx, preExistingNS)).To(Succeed(), "Failed to create pre-existing NS") + + // Create a new Work object with all the manifest JSONs. + // + // This Work object uses an apply strategy that always take over and use full + // comparison for drift detection. Apply op will be successful, and namespaces are + // considered to be immediately available after creation; Fleet will report the label + // differences as drifts without blocking the apply ops. + applyStrategy := &fleetv1beta1.ApplyStrategy{ + ComparisonOption: fleetv1beta1.ComparisonOptionTypeFullComparison, + WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeAlways, + WhenToApply: fleetv1beta1.WhenToApplyTypeAlways, + } + createWorkObject(workName, memberReservedNSName2, applyStrategy, regularNSJSON) + }) + + // For simplicity reasons, this test case will skip some of the regular apply op result verification + // (finalizer check, AppliedWork object check, etc.). + + It("should update the Work object status", func() { + // Prepare the status information. + + // Use custom check logic so that the test case can track timestamps across steps. + Eventually(func() error { + // Retrieve the Work object. + work := &fleetv1beta1.Work{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName2}, work); err != nil { + return fmt.Errorf("failed to retrieve the Work object: %w", err) + } + + if diff := cmp.Diff( + &work.Status, wantWorkStatus, + ignoreFieldConditionLTTMsg, + ignoreDiffDetailsObsTime, ignoreDriftDetailsObsTime, + cmpopts.SortSlices(lessFuncPatchDetail), + ); diff != "" { + return fmt.Errorf("work status diff (-got, +want):\n%s", diff) + } + + // Track the observation timestamp of the diff details. + lastDriftObservedTime = &work.Status.ManifestConditions[0].DriftDetails.ObservationTime + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update Work object status") + }) + + It("should wait for the fixed delay period of time before next reconciliation", func() { + curDriftObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(driftObservationTimeChangedActual(workName, wantWorkStatus, curDriftObservedTime, lastDriftObservedTime), eventuallyDuration*2, eventuallyInterval).Should(Or(Succeed())) + + // Fixed delay is set to 10 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDriftObservedTime.Sub(lastDriftObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*9), + BeNumerically("<=", time.Second*11), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDriftObservedTime = curDriftObservedTime + }) + + It("should start to back off slowly (attempt #1)", func() { + curDriftObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(driftObservationTimeChangedActual(workName, wantWorkStatus, curDriftObservedTime, lastDriftObservedTime), eventuallyDuration*3, eventuallyInterval).Should(Or(Succeed())) + + // The first fast backoff delay is 60 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDriftObservedTime.Sub(lastDriftObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*19), + BeNumerically("<=", time.Second*21), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDriftObservedTime = curDriftObservedTime + }) + + It("should skip to backing off fastly (attempt #1)", func() { + curDriftObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(driftObservationTimeChangedActual(workName, wantWorkStatus, curDriftObservedTime, lastDriftObservedTime), eventuallyDuration*5, eventuallyInterval).Should(Or(Succeed())) + + // The first fast backoff delay is 40 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDriftObservedTime.Sub(lastDriftObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*39), + BeNumerically("<=", time.Second*41), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDriftObservedTime = curDriftObservedTime + }) + + It("should skip to backing off fastly (attempt #2)", func() { + curDriftObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(driftObservationTimeChangedActual(workName, wantWorkStatus, curDriftObservedTime, lastDriftObservedTime), eventuallyDuration*9, eventuallyInterval).Should(Or(Succeed())) + + // The second fast backoff delay is 80 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDriftObservedTime.Sub(lastDriftObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*79), + BeNumerically("<=", time.Second*81), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDriftObservedTime = curDriftObservedTime + }) + + It("should reach maximum backoff", func() { + curDriftObservedTime := &metav1.Time{} + + // Need to poll a bit longer to avoid flakiness. + Eventually(driftObservationTimeChangedActual(workName, wantWorkStatus, curDriftObservedTime, lastDriftObservedTime), eventuallyDuration*10, eventuallyInterval).Should(Or(Succeed())) + + // The maximum backoff delay is 60 seconds. Give a one-sec leeway to avoid flakiness. + Expect(curDriftObservedTime.Sub(lastDriftObservedTime.Time)).To(And( + BeNumerically(">=", time.Second*89), + BeNumerically("<=", time.Second*91), + ), "the interval between two observations is not as expected") + + // Update the tracked observation time. + lastDriftObservedTime = curDriftObservedTime + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName2) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt to verify its deletion. + }) + }) +}) diff --git a/pkg/controllers/workapplier/backoff_test.go b/pkg/controllers/workapplier/backoff_test.go index fac61b8e6..feecaa768 100644 --- a/pkg/controllers/workapplier/backoff_test.go +++ b/pkg/controllers/workapplier/backoff_test.go @@ -33,7 +33,7 @@ import ( func TestWhenWithFullNormalSequence(t *testing.T) { work := &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, } @@ -150,7 +150,7 @@ func TestWhenWithFullNormalSequence(t *testing.T) { func TestWhenWithFullNoSlowBackoffSequence(t *testing.T) { work := &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, } @@ -211,7 +211,7 @@ func TestWhenWithFullNoSlowBackoffSequence(t *testing.T) { func TestWhenWithFullNoFastBackoffSequeuce(t *testing.T) { work := &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, } @@ -276,7 +276,7 @@ func TestWhenWithFullNoFastBackoffSequeuce(t *testing.T) { func TestWhenWithNoBackoffSequence(t *testing.T) { work := &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, } @@ -768,7 +768,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "first requeue", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, }, @@ -779,7 +779,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "second requeue", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, }, @@ -790,7 +790,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue (#3) w/ gen change", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -802,7 +802,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #4", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -815,7 +815,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #5", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -827,7 +827,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #6", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -839,7 +839,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #7 w/ processing result change", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -855,7 +855,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #8", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -871,7 +871,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #9", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -887,7 +887,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #10", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -903,7 +903,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #11", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -919,7 +919,7 @@ func TestWhenWithGenerationAndProcessingResultChange(t *testing.T) { name: "requeue #12 w/ both gen and processing result change", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 3, }, @@ -970,7 +970,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "first requeue", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, }, @@ -986,7 +986,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #2, work becomes available", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, Status: fleetv1beta1.WorkStatus{ @@ -1010,7 +1010,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #3, work stays available", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, Status: fleetv1beta1.WorkStatus{ @@ -1034,7 +1034,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #4, work stays available", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, Status: fleetv1beta1.WorkStatus{ @@ -1058,7 +1058,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #5, work stays available", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, Status: fleetv1beta1.WorkStatus{ @@ -1082,7 +1082,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #6, work changed to ReportDiff mode", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -1107,7 +1107,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #7, no diff found", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -1132,7 +1132,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #8, no diff found", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -1157,7 +1157,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #9, no diff found", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -1182,7 +1182,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #9, diff found", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -1207,7 +1207,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #10, diff found", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -1232,7 +1232,7 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { name: "requeue #11, diff found", work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, Generation: 2, }, @@ -1272,15 +1272,15 @@ func TestWhenWithSkipToFastBackoff(t *testing.T) { // TestForget tests the Forget method. func TestForget(t *testing.T) { workNamespacedName1 := types.NamespacedName{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: fmt.Sprintf(workNameTemplate, "1"), } workNamespacedName2 := types.NamespacedName{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: fmt.Sprintf(workNameTemplate, "2"), } workNamespacedName3 := types.NamespacedName{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: fmt.Sprintf(workNameTemplate, "3"), } @@ -1333,7 +1333,7 @@ func TestForget(t *testing.T) { }, work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workNamespacedName2.Name, }, }, @@ -1392,7 +1392,7 @@ func TestForget(t *testing.T) { }, work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workNamespacedName3.Name, }, }, @@ -1452,7 +1452,7 @@ func TestForget(t *testing.T) { func TestComputeProcessingResultHash(t *testing.T) { work := &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Name: workName, }, } diff --git a/pkg/controllers/workapplier/controller.go b/pkg/controllers/workapplier/controller.go index b61ce7316..2aa30bd21 100644 --- a/pkg/controllers/workapplier/controller.go +++ b/pkg/controllers/workapplier/controller.go @@ -352,17 +352,38 @@ const ( ) type manifestProcessingBundle struct { - manifest *fleetv1beta1.Manifest - id *fleetv1beta1.WorkResourceIdentifier - manifestObj *unstructured.Unstructured - inMemberClusterObj *unstructured.Unstructured - gvr *schema.GroupVersionResource + // The manifest data in the raw form (not decoded yet). + manifest *fleetv1beta1.Manifest + // The work resource identifier of the manifest. + // If the manifest data cannot be decoded as a Kubernetes API object at all, the identifier + // will feature only the ordinal of the manifest data (its rank in the list of the resources). + // If the manifest data can be decoded as a Kubernetes API object, but the API is not available + // on the member cluster, the resource field of the identifier will be empty. + id *fleetv1beta1.WorkResourceIdentifier + // A string representation of the work resource identifier (sans the resources field). + // This is only populated if the manifest data can be successfully decoded. + // + // It is of the format `GV=API_GROUP/VERSION, Kind=KIND, Namespace=NAMESPACE, Name=NAME`, + // where API_GROUP, VERSION, KIND, NAMESPACE, and NAME are the API group/version/kind of the + // manifest object, and its owner namespace (if applicable) and name, respectively. + workResourceIdentifierStr string + // The manifest data, decoded as a Kubernetes API object. + manifestObj *unstructured.Unstructured + // The object in the member cluster that corresponds to the manifest object. + inMemberClusterObj *unstructured.Unstructured + // The GVR of the manifest object. + gvr *schema.GroupVersionResource + // The result type of the apply op or the diff reporting op. applyOrReportDiffResTyp ManifestProcessingApplyOrReportDiffResultType - availabilityResTyp ManifestProcessingAvailabilityResultType - applyOrReportDiffErr error - availabilityErr error - drifts []fleetv1beta1.PatchDetail - diffs []fleetv1beta1.PatchDetail + // The result type of the availability check op. + availabilityResTyp ManifestProcessingAvailabilityResultType + // The error that stops the apply op or the diff reporting op. + applyOrReportDiffErr error + // The error that stops the availability check op. + availabilityErr error + // Configuration drifts/diffs detected during the apply op or the diff reporting op. + drifts []fleetv1beta1.PatchDetail + diffs []fleetv1beta1.PatchDetail } // Reconcile implement the control loop logic for Work object. diff --git a/pkg/controllers/workapplier/controller_integration_migrated_helper_test.go b/pkg/controllers/workapplier/controller_integration_migrated_helper_test.go index 5e494080d..0833bd963 100644 --- a/pkg/controllers/workapplier/controller_integration_migrated_helper_test.go +++ b/pkg/controllers/workapplier/controller_integration_migrated_helper_test.go @@ -41,7 +41,7 @@ func createWorkWithManifest(manifest runtime.Object) *fleetv1beta1.Work { newWork := fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: "work-" + utilrand.String(5), - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, }, Spec: fleetv1beta1.WorkSpec{ Workload: fleetv1beta1.WorkloadTemplate{ @@ -59,7 +59,7 @@ func createWorkWithManifest(manifest runtime.Object) *fleetv1beta1.Work { // verifyAppliedConfigMap verifies that the applied CM is the same as the CM we want to apply func verifyAppliedConfigMap(cm *corev1.ConfigMap) *corev1.ConfigMap { var appliedCM corev1.ConfigMap - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: cm.GetName(), Namespace: cm.GetNamespace()}, &appliedCM)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: cm.GetName(), Namespace: cm.GetNamespace()}, &appliedCM)).Should(Succeed()) By("Check the config map label") Expect(cmp.Diff(appliedCM.Labels, cm.Labels)).Should(BeEmpty()) @@ -81,7 +81,7 @@ func verifyAppliedConfigMap(cm *corev1.ConfigMap) *corev1.ConfigMap { func waitForWorkToApply(workName string) *fleetv1beta1.Work { var resultWork fleetv1beta1.Work Eventually(func() bool { - err := hubClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: memberReservedNSName}, &resultWork) + err := hubClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: memberReservedNSName1}, &resultWork) if err != nil { return false } @@ -105,7 +105,7 @@ func waitForWorkToApply(workName string) *fleetv1beta1.Work { func waitForWorkToBeAvailable(workName string) *fleetv1beta1.Work { var resultWork fleetv1beta1.Work Eventually(func() bool { - err := hubClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: memberReservedNSName}, &resultWork) + err := hubClient.Get(context.Background(), types.NamespacedName{Name: workName, Namespace: memberReservedNSName1}, &resultWork) if err != nil { return false } diff --git a/pkg/controllers/workapplier/controller_integration_migrated_test.go b/pkg/controllers/workapplier/controller_integration_migrated_test.go index 1f25be53c..ebc6a047e 100644 --- a/pkg/controllers/workapplier/controller_integration_migrated_test.go +++ b/pkg/controllers/workapplier/controller_integration_migrated_test.go @@ -108,7 +108,7 @@ var _ = Describe("Work Controller", func() { By("Check applied config map") var configMap corev1.ConfigMap - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, &configMap)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, &configMap)).Should(Succeed()) Expect(cmp.Diff(configMap.Labels, cm.Labels)).Should(BeEmpty()) Expect(cmp.Diff(configMap.Data, cm.Data)).Should(BeEmpty()) @@ -223,11 +223,11 @@ var _ = Describe("Work Controller", func() { // remove label key2 and key3 delete(cm.Labels, "labelKey2") delete(cm.Labels, "labelKey3") - Expect(memberClient.Update(context.Background(), appliedCM)).Should(Succeed()) + Expect(memberClient1.Update(context.Background(), appliedCM)).Should(Succeed()) By("Get the last applied config map and verify it's updated") var modifiedCM corev1.ConfigMap - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: cm.GetName(), Namespace: cm.GetNamespace()}, &modifiedCM)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: cm.GetName(), Namespace: cm.GetNamespace()}, &modifiedCM)).Should(Succeed()) Expect(cmp.Diff(appliedCM.Labels, modifiedCM.Labels)).Should(BeEmpty()) Expect(cmp.Diff(appliedCM.Data, modifiedCM.Data)).Should(BeEmpty()) @@ -250,7 +250,7 @@ var _ = Describe("Work Controller", func() { waitForWorkToApply(work.GetName()) By("Get the last applied config map") - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, appliedCM)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: cmNamespace}, appliedCM)).Should(Succeed()) By("Check the config map data") // data1's value picks up our change @@ -317,7 +317,7 @@ var _ = Describe("Work Controller", func() { By("Check applied TestResource") var appliedTestResource testv1alpha1.TestResource - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: testResourceName, Namespace: testResourceNamespace}, &appliedTestResource)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: testResourceName, Namespace: testResourceNamespace}, &appliedTestResource)).Should(Succeed()) By("verify the TestResource spec") Expect(cmp.Diff(appliedTestResource.Spec, testResource.Spec)).Should(BeEmpty()) @@ -338,11 +338,11 @@ var _ = Describe("Work Controller", func() { appliedTestResource.Spec.Items = []string{"a", "b"} appliedTestResource.Spec.Foo = "foo1" appliedTestResource.Spec.Bar = "bar1" - Expect(memberClient.Update(context.Background(), &appliedTestResource)).Should(Succeed()) + Expect(memberClient1.Update(context.Background(), &appliedTestResource)).Should(Succeed()) By("Verify applied TestResource modified") var modifiedTestResource testv1alpha1.TestResource - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: testResourceName, Namespace: testResourceNamespace}, &modifiedTestResource)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: testResourceName, Namespace: testResourceNamespace}, &modifiedTestResource)).Should(Succeed()) Expect(cmp.Diff(appliedTestResource.Spec, modifiedTestResource.Spec)).Should(BeEmpty()) By("Modify the TestResource") @@ -364,7 +364,7 @@ var _ = Describe("Work Controller", func() { waitForWorkToApply(work.GetName()) By("Get the last applied TestResource") - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: testResourceName, Namespace: testResourceNamespace}, &appliedTestResource)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: testResourceName, Namespace: testResourceNamespace}, &appliedTestResource)).Should(Succeed()) By("Check the TestResource spec, its an override for arrays") expectedItems := []string{"a", "b"} @@ -412,11 +412,11 @@ var _ = Describe("Work Controller", func() { By("Delete the last applied annotation from the current resource") delete(appliedCM.Annotations, fleetv1beta1.LastAppliedConfigAnnotation) - Expect(memberClient.Update(ctx, appliedCM)).Should(Succeed()) + Expect(memberClient1.Update(ctx, appliedCM)).Should(Succeed()) By("Get the last applied config map and verify it does not have the last applied annotation") var modifiedCM corev1.ConfigMap - Expect(memberClient.Get(ctx, types.NamespacedName{Name: cm.GetName(), Namespace: cm.GetNamespace()}, &modifiedCM)).Should(Succeed()) + Expect(memberClient1.Get(ctx, types.NamespacedName{Name: cm.GetName(), Namespace: cm.GetNamespace()}, &modifiedCM)).Should(Succeed()) Expect(modifiedCM.Annotations[fleetv1beta1.LastAppliedConfigAnnotation]).Should(BeEmpty()) By("Modify the manifest") @@ -438,7 +438,7 @@ var _ = Describe("Work Controller", func() { waitForWorkToApply(work.GetName()) By("Check applied configMap is modified even without the last applied annotation") - Expect(memberClient.Get(ctx, types.NamespacedName{Name: cmName, Namespace: cmNamespace}, appliedCM)).Should(Succeed()) + Expect(memberClient1.Get(ctx, types.NamespacedName{Name: cmName, Namespace: cmNamespace}, appliedCM)).Should(Succeed()) verifyAppliedConfigMap(cm) Expect(hubClient.Delete(ctx, work)).Should(Succeed(), "Failed to deleted the work") @@ -533,7 +533,7 @@ var _ = Describe("Work Controller", func() { Data: data, } // make sure we can call join as many as possible - Expect(workApplier.Join(ctx)).Should(Succeed()) + Expect(workApplier1.Join(ctx)).Should(Succeed()) work = createWorkWithManifest(cm) err := hubClient.Create(ctx, work) Expect(err).ToNot(HaveOccurred()) @@ -548,7 +548,7 @@ var _ = Describe("Work Controller", func() { By("mark the work controller as leave") Eventually(func() error { - return workApplier.Leave(ctx) + return workApplier1.Leave(ctx) }, eventuallyDuration, eventuallyInterval).Should(Succeed()) By("make sure the manifests have no finalizer and its status match the member cluster") @@ -561,12 +561,12 @@ var _ = Describe("Work Controller", func() { } for i := 0; i < numWork; i++ { var resultWork fleetv1beta1.Work - Expect(hubClient.Get(ctx, types.NamespacedName{Name: works[i].GetName(), Namespace: memberReservedNSName}, &resultWork)).Should(Succeed()) + Expect(hubClient.Get(ctx, types.NamespacedName{Name: works[i].GetName(), Namespace: memberReservedNSName1}, &resultWork)).Should(Succeed()) Expect(controllerutil.ContainsFinalizer(&resultWork, fleetv1beta1.WorkFinalizer)).Should(BeFalse()) // make sure that leave can be called as many times as possible // The work may be updated and may hit 409 error. Eventually(func() error { - return workApplier.Leave(ctx) + return workApplier1.Leave(ctx) }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to set the work controller to leave") By(fmt.Sprintf("change the work = %s", work.GetName())) cm = &corev1.ConfigMap{ @@ -591,7 +591,7 @@ var _ = Describe("Work Controller", func() { for i := 0; i < numWork; i++ { By(fmt.Sprintf("updated the work = %s", works[i].GetName())) var resultWork fleetv1beta1.Work - err := hubClient.Get(context.Background(), types.NamespacedName{Name: works[i].GetName(), Namespace: memberReservedNSName}, &resultWork) + err := hubClient.Get(context.Background(), types.NamespacedName{Name: works[i].GetName(), Namespace: memberReservedNSName1}, &resultWork) Expect(err).Should(Succeed()) Expect(controllerutil.ContainsFinalizer(&resultWork, fleetv1beta1.WorkFinalizer)).Should(BeFalse()) applyCond := meta.FindStatusCondition(resultWork.Status.Conditions, fleetv1beta1.WorkConditionTypeApplied) @@ -599,14 +599,14 @@ var _ = Describe("Work Controller", func() { return false } By("check if the config map is not changed") - Expect(memberClient.Get(ctx, types.NamespacedName{Name: cmNames[i], Namespace: cmNamespace}, &configMap)).Should(Succeed()) + Expect(memberClient1.Get(ctx, types.NamespacedName{Name: cmNames[i], Namespace: cmNamespace}, &configMap)).Should(Succeed()) Expect(cmp.Diff(configMap.Data, data)).Should(BeEmpty()) } return true }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) By("enable the work controller again") - Expect(workApplier.Join(ctx)).Should(Succeed()) + Expect(workApplier1.Join(ctx)).Should(Succeed()) By("make sure the work change get picked up") for i := 0; i < numWork; i++ { @@ -614,7 +614,7 @@ var _ = Describe("Work Controller", func() { Expect(len(resultWork.Status.ManifestConditions)).Should(Equal(1)) Expect(meta.IsStatusConditionTrue(resultWork.Status.ManifestConditions[0].Conditions, fleetv1beta1.WorkConditionTypeApplied)).Should(BeTrue()) By("the work is applied, check if the applied config map is updated") - Expect(memberClient.Get(ctx, types.NamespacedName{Name: cmNames[i], Namespace: cmNamespace}, &configMap)).Should(Succeed()) + Expect(memberClient1.Get(ctx, types.NamespacedName{Name: cmNames[i], Namespace: cmNamespace}, &configMap)).Should(Succeed()) Expect(cmp.Diff(configMap.Data, newData)).Should(BeEmpty()) } }) @@ -634,7 +634,7 @@ var _ = Describe("Work Status Reconciler", func() { Name: resourceNamespace, }, } - Expect(memberClient.Create(context.Background(), &rns)).Should(Succeed(), "Failed to create the resource namespace") + Expect(memberClient1.Create(context.Background(), &rns)).Should(Succeed(), "Failed to create the resource namespace") // Create the Work object with some type of Manifest resource. cm = &corev1.ConfigMap{ @@ -668,7 +668,7 @@ var _ = Describe("Work Status Reconciler", func() { work = &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: "work-" + utilrand.String(5), - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, }, Spec: fleetv1beta1.WorkSpec{ Workload: fleetv1beta1.WorkloadTemplate{ @@ -688,7 +688,7 @@ var _ = Describe("Work Status Reconciler", func() { AfterEach(func() { // TODO: Ensure that all resources are being deleted. Expect(hubClient.Delete(context.Background(), work)).Should(Succeed()) - Expect(memberClient.Delete(context.Background(), &rns)).Should(Succeed()) + Expect(memberClient1.Delete(context.Background(), &rns)).Should(Succeed()) }) It("Should delete the manifest from the member cluster after it is removed from work", func() { @@ -698,7 +698,7 @@ var _ = Describe("Work Status Reconciler", func() { By("Make sure that the work is applied") currentWork := waitForWorkToApply(work.Name) var appliedWork fleetv1beta1.AppliedWork - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) Expect(len(appliedWork.Status.AppliedResources)).Should(Equal(2)) By("Remove configMap 2 from the work") @@ -712,12 +712,12 @@ var _ = Describe("Work Status Reconciler", func() { By("Verify that the resource is removed from the cluster") Eventually(func() bool { var configMap corev1.ConfigMap - return apierrors.IsNotFound(memberClient.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap)) + return apierrors.IsNotFound(memberClient1.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap)) }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) By("Verify that the appliedWork status is correct") Eventually(func() bool { - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) return len(appliedWork.Status.AppliedResources) == 1 }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) Expect(appliedWork.Status.AppliedResources[0].Name).Should(Equal(cm.GetName())) @@ -734,7 +734,7 @@ var _ = Describe("Work Status Reconciler", func() { By("Make sure that the work is applied") currentWork := waitForWorkToApply(work.Name) var appliedWork fleetv1beta1.AppliedWork - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) Expect(len(appliedWork.Status.AppliedResources)).Should(Equal(2)) By("replace configMap with a bad object from the work") @@ -759,17 +759,17 @@ var _ = Describe("Work Status Reconciler", func() { By("Verify that the configMaps are removed from the cluster even if the new resource didn't apply") Eventually(func() bool { var configMap corev1.ConfigMap - return apierrors.IsNotFound(memberClient.Get(context.Background(), types.NamespacedName{Name: cm.Name, Namespace: resourceNamespace}, &configMap)) + return apierrors.IsNotFound(memberClient1.Get(context.Background(), types.NamespacedName{Name: cm.Name, Namespace: resourceNamespace}, &configMap)) }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) Eventually(func() bool { var configMap corev1.ConfigMap - return apierrors.IsNotFound(memberClient.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap)) + return apierrors.IsNotFound(memberClient1.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap)) }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) By("Verify that the appliedWork status is correct") Eventually(func() bool { - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) return len(appliedWork.Status.AppliedResources) == 0 }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) }) @@ -781,14 +781,14 @@ var _ = Describe("Work Status Reconciler", func() { By("Make sure that the work is applied") currentWork := waitForWorkToApply(work.Name) var appliedWork fleetv1beta1.AppliedWork - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) Expect(len(appliedWork.Status.AppliedResources)).Should(Equal(2)) By("Make sure that the manifests exist on the member cluster") Eventually(func() bool { var configMap corev1.ConfigMap - return memberClient.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap) == nil && - memberClient.Get(context.Background(), types.NamespacedName{Name: cm.Name, Namespace: resourceNamespace}, &configMap) == nil + return memberClient1.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap) == nil && + memberClient1.Get(context.Background(), types.NamespacedName{Name: cm.Name, Namespace: resourceNamespace}, &configMap) == nil }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) By("Change the order of the two configs in the work") @@ -805,13 +805,13 @@ var _ = Describe("Work Status Reconciler", func() { By("Verify that nothing is removed from the cluster") Consistently(func() bool { var configMap corev1.ConfigMap - return memberClient.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap) == nil && - memberClient.Get(context.Background(), types.NamespacedName{Name: cm.Name, Namespace: resourceNamespace}, &configMap) == nil + return memberClient1.Get(context.Background(), types.NamespacedName{Name: cm2.Name, Namespace: resourceNamespace}, &configMap) == nil && + memberClient1.Get(context.Background(), types.NamespacedName{Name: cm.Name, Namespace: resourceNamespace}, &configMap) == nil }, consistentlyDuration, consistentlyInterval).Should(BeTrue()) By("Verify that the appliedWork status is correct") Eventually(func() bool { - Expect(memberClient.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) + Expect(memberClient1.Get(context.Background(), types.NamespacedName{Name: work.Name}, &appliedWork)).Should(Succeed()) return len(appliedWork.Status.AppliedResources) == 2 }, eventuallyDuration, eventuallyInterval).Should(BeTrue()) Expect(appliedWork.Status.AppliedResources[0].Name).Should(Equal(cm2.GetName())) diff --git a/pkg/controllers/workapplier/controller_integration_test.go b/pkg/controllers/workapplier/controller_integration_test.go index 9aacfe71a..e76537c39 100644 --- a/pkg/controllers/workapplier/controller_integration_test.go +++ b/pkg/controllers/workapplier/controller_integration_test.go @@ -75,7 +75,7 @@ var ( ) // createWorkObject creates a new Work object with the given name, manifests, and apply strategy. -func createWorkObject(workName string, applyStrategy *fleetv1beta1.ApplyStrategy, rawManifestJSON ...[]byte) { +func createWorkObject(workName, memberClusterReservedNSName string, applyStrategy *fleetv1beta1.ApplyStrategy, rawManifestJSON ...[]byte) { manifests := make([]fleetv1beta1.Manifest, len(rawManifestJSON)) for idx := range rawManifestJSON { manifests[idx] = fleetv1beta1.Manifest{ @@ -88,7 +88,7 @@ func createWorkObject(workName string, applyStrategy *fleetv1beta1.ApplyStrategy work := &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberClusterReservedNSName, }, Spec: fleetv1beta1.WorkSpec{ Workload: fleetv1beta1.WorkloadTemplate{ @@ -111,7 +111,7 @@ func updateWorkObject(workName string, applyStrategy *fleetv1beta1.ApplyStrategy } work := &fleetv1beta1.Work{} - Expect(hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work)).To(Succeed()) + Expect(hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work)).To(Succeed()) work.Spec.Workload.Manifests = manifests work.Spec.ApplyStrategy = applyStrategy @@ -131,7 +131,7 @@ func workFinalizerAddedActual(workName string) func() error { return func() error { // Retrieve the Work object. work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); err != nil { return fmt.Errorf("failed to retrieve the Work object: %w", err) } @@ -147,7 +147,7 @@ func appliedWorkCreatedActual(workName string) func() error { return func() error { // Retrieve the AppliedWork object. appliedWork := &fleetv1beta1.AppliedWork{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, appliedWork); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, appliedWork); err != nil { return fmt.Errorf("failed to retrieve the AppliedWork object: %w", err) } @@ -157,7 +157,7 @@ func appliedWorkCreatedActual(workName string) func() error { }, Spec: fleetv1beta1.AppliedWorkSpec{ WorkName: workName, - WorkNamespace: memberReservedNSName, + WorkNamespace: memberReservedNSName1, }, } if diff := cmp.Diff( @@ -174,7 +174,7 @@ func appliedWorkCreatedActual(workName string) func() error { func prepareAppliedWorkOwnerRef(workName string) *metav1.OwnerReference { // Retrieve the AppliedWork object. appliedWork := &fleetv1beta1.AppliedWork{} - Expect(memberClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, appliedWork)).To(Succeed(), "Failed to retrieve the AppliedWork object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, appliedWork)).To(Succeed(), "Failed to retrieve the AppliedWork object") // Prepare the expected OwnerReference. return &metav1.OwnerReference{ @@ -190,7 +190,7 @@ func regularNSObjectAppliedActual(nsName string, appliedWorkOwnerRef *metav1.Own return func() error { // Retrieve the NS object. gotNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, gotNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, gotNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -222,7 +222,7 @@ func regularDeploymentObjectAppliedActual(nsName, deployName string, appliedWork return func() error { // Retrieve the Deployment object. gotDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -285,7 +285,7 @@ func regularClusterRoleObjectAppliedActual(clusterRoleName string, appliedWorkOw return func() error { // Retrieve the ClusterRole object. gotClusterRole := &rbacv1.ClusterRole{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole); err != nil { return fmt.Errorf("failed to retrieve the ClusterRole object: %w", err) } @@ -318,7 +318,7 @@ func regularConfigMapObjectAppliedActual(nsName, configMapName string, appliedWo return func() error { // Retrieve the ConfigMap object. gotConfigMap := &corev1.ConfigMap{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, gotConfigMap); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, gotConfigMap); err != nil { return fmt.Errorf("failed to retrieve the ConfigMap object: %w", err) } @@ -351,7 +351,7 @@ func regularConfigMapObjectAppliedActual(nsName, configMapName string, appliedWo func markDeploymentAsAvailable(nsName, deployName string) { // Retrieve the Deployment object. gotDeploy := &appsv1.Deployment{} - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") // Mark the Deployment object as available. now := metav1.Now() @@ -377,7 +377,7 @@ func markDeploymentAsAvailable(nsName, deployName string) { }, }, } - Expect(memberClient.Status().Update(ctx, gotDeploy)).To(Succeed(), "Failed to mark the Deployment object as available") + Expect(memberClient1.Status().Update(ctx, gotDeploy)).To(Succeed(), "Failed to mark the Deployment object as available") } func workStatusUpdated( @@ -390,7 +390,7 @@ func workStatusUpdated( return func() error { // Retrieve the Work object. work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); err != nil { return fmt.Errorf("failed to retrieve the Work object: %w", err) } @@ -459,7 +459,7 @@ func appliedWorkStatusUpdated(workName string, appliedResourceMeta []fleetv1beta return func() error { // Retrieve the AppliedWork object. appliedWork := &fleetv1beta1.AppliedWork{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, appliedWork); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, appliedWork); err != nil { return fmt.Errorf("failed to retrieve the AppliedWork object: %w", err) } @@ -478,7 +478,7 @@ func workRemovedActual(workName string) func() error { // Wait for the removal of the Work object. return func() error { work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); !errors.IsNotFound(err) && err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); !errors.IsNotFound(err) && err != nil { return fmt.Errorf("work object still exists or an unexpected error occurred: %w", err) } if controllerutil.ContainsFinalizer(work, fleetv1beta1.WorkFinalizer) { @@ -489,12 +489,12 @@ func workRemovedActual(workName string) func() error { } } -func deleteWorkObject(workName string) { +func deleteWorkObject(workName, memberClusterReservedNSName string) { // Retrieve the Work object. work := &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberClusterReservedNSName, }, } Expect(hubClient.Delete(ctx, work)).To(Succeed(), "Failed to delete the Work object") @@ -503,7 +503,7 @@ func deleteWorkObject(workName string) { func checkNSOwnerReferences(workName, nsName string) { // Retrieve the AppliedWork object. appliedWork := &fleetv1beta1.AppliedWork{} - Expect(memberClient.Get(ctx, client.ObjectKey{Name: workName}, appliedWork)).To(Succeed(), "Failed to retrieve the AppliedWork object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: workName}, appliedWork)).To(Succeed(), "Failed to retrieve the AppliedWork object") // Check that the Namespace object has the AppliedWork as an owner reference. ns := &corev1.Namespace{ @@ -511,7 +511,7 @@ func checkNSOwnerReferences(workName, nsName string) { Name: nsName, }, } - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, ns)).To(Succeed(), "Failed to retrieve the Namespace object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, ns)).To(Succeed(), "Failed to retrieve the Namespace object") Expect(ns.OwnerReferences).To(ContainElement(metav1.OwnerReference{ APIVersion: fleetv1beta1.GroupVersion.String(), Kind: "AppliedWork", @@ -525,7 +525,7 @@ func appliedWorkRemovedActual(workName, nsName string) func() error { return func() error { // Retrieve the AppliedWork object. appliedWork := &fleetv1beta1.AppliedWork{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { if errors.IsNotFound(err) { // The AppliedWork object has been deleted, which is expected. return nil @@ -536,7 +536,7 @@ func appliedWorkRemovedActual(workName, nsName string) func() error { // The AppliedWork object is being deleted, but the finalizer is still present. Remove the finalizer as there // are no real built-in controllers in this test environment to handle garbage collection. controllerutil.RemoveFinalizer(appliedWork, metav1.FinalizerDeleteDependents) - Expect(memberClient.Update(ctx, appliedWork)).To(Succeed(), "Failed to remove the finalizer from the AppliedWork object") + Expect(memberClient1.Update(ctx, appliedWork)).To(Succeed(), "Failed to remove the finalizer from the AppliedWork object") } return fmt.Errorf("appliedWork object still exists") } @@ -551,11 +551,11 @@ func regularDeployRemovedActual(nsName, deployName string) func() error { Name: deployName, }, } - if err := memberClient.Delete(ctx, deploy); err != nil && !errors.IsNotFound(err) { + if err := memberClient1.Delete(ctx, deploy); err != nil && !errors.IsNotFound(err) { return fmt.Errorf("failed to delete the Deployment object: %w", err) } - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, deploy); !errors.IsNotFound(err) { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, deploy); !errors.IsNotFound(err) { return fmt.Errorf("deployment object still exists or an unexpected error occurred: %w", err) } return nil @@ -570,11 +570,11 @@ func regularClusterRoleRemovedActual(clusterRoleName string) func() error { Name: clusterRoleName, }, } - if err := memberClient.Delete(ctx, clusterRole); err != nil && !errors.IsNotFound(err) { + if err := memberClient1.Delete(ctx, clusterRole); err != nil && !errors.IsNotFound(err) { return fmt.Errorf("failed to delete the ClusterRole object: %w", err) } - if err := memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, clusterRole); !errors.IsNotFound(err) { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, clusterRole); !errors.IsNotFound(err) { return fmt.Errorf("clusterRole object still exists or an unexpected error occurred: %w", err) } return nil @@ -590,12 +590,12 @@ func regularConfigMapRemovedActual(nsName, configMapName string) func() error { Name: configMapName, }, } - if err := memberClient.Delete(ctx, configMap); err != nil && !errors.IsNotFound(err) { + if err := memberClient1.Delete(ctx, configMap); err != nil && !errors.IsNotFound(err) { return fmt.Errorf("failed to delete the ConfigMap object: %w", err) } // Check that the ConfigMap object has been deleted. - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, configMap); !errors.IsNotFound(err) { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, configMap); !errors.IsNotFound(err) { return fmt.Errorf("configMap object still exists or an unexpected error occurred: %w", err) } return nil @@ -606,7 +606,7 @@ func regularNSObjectNotAppliedActual(nsName string) func() error { return func() error { // Retrieve the NS object. ns := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, ns); !errors.IsNotFound(err) { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, ns); !errors.IsNotFound(err) { return fmt.Errorf("namespace object exists or an unexpected error occurred: %w", err) } return nil @@ -622,7 +622,7 @@ func regularDeployNotRemovedActual(nsName, deployName string) func() error { Name: deployName, }, } - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, deploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, deploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } return nil @@ -653,7 +653,7 @@ var _ = Describe("applying manifests", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create a new Work object with all the manifest JSONs. - createWorkObject(workName, nil, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -673,13 +673,13 @@ var _ = Describe("applying manifests", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") }) It("can mark the deployment as available", func() { @@ -790,7 +790,7 @@ var _ = Describe("applying manifests", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularDeployRemovedActual := regularDeployRemovedActual(nsName, deployName) @@ -808,7 +808,7 @@ var _ = Describe("applying manifests", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -835,7 +835,7 @@ var _ = Describe("applying manifests", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create a new Work object with all the manifest JSONs. - createWorkObject(workName, nil, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -855,13 +855,13 @@ var _ = Describe("applying manifests", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") }) It("can mark the deployment as available", func() { @@ -1053,7 +1053,7 @@ var _ = Describe("applying manifests", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). @@ -1066,7 +1066,7 @@ var _ = Describe("applying manifests", func() { workRemovedActual := workRemovedActual(workName) Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -1101,7 +1101,7 @@ var _ = Describe("applying manifests", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create a new Work object with all the manifest JSONs. - createWorkObject(workName, nil, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -1121,7 +1121,7 @@ var _ = Describe("applying manifests", func() { Eventually(func() error { // Retrieve the NS object. gotNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, gotNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, gotNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -1150,14 +1150,14 @@ var _ = Describe("applying manifests", func() { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") }) It("should not apply the Deployment object", func() { Consistently(func() error { // List all Deployments. gotDeployList := &appsv1.DeploymentList{} - if err := memberClient.List(ctx, gotDeployList, client.InNamespace(nsName)); err != nil { + if err := memberClient1.List(ctx, gotDeployList, client.InNamespace(nsName)); err != nil { return fmt.Errorf("failed to list Deployment objects: %w", err) } @@ -1250,7 +1250,7 @@ var _ = Describe("applying manifests", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). @@ -1264,7 +1264,7 @@ var _ = Describe("applying manifests", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -1301,7 +1301,7 @@ var _ = Describe("applying manifests", func() { regularConfigMapJSON := marshalK8sObjJSON(regularConfigMap) // Create a new Work object with all the manifest JSONs. - createWorkObject(workName, nil, regularNSJSON, decodingErredDeployJSON, regularConfigMapJSON) + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, decodingErredDeployJSON, regularConfigMapJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -1321,12 +1321,12 @@ var _ = Describe("applying manifests", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the ConfigMap object has been applied as expected. regularConfigMapObjectAppliedActual := regularConfigMapObjectAppliedActual(nsName, configMapName, appliedWorkOwnerRef) Eventually(regularConfigMapObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the ConfigMap object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, regularConfigMap)).To(Succeed(), "Failed to retrieve the ConfigMap object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, regularConfigMap)).To(Succeed(), "Failed to retrieve the ConfigMap object") }) It("should update the Work object status", func() { @@ -1442,7 +1442,7 @@ var _ = Describe("applying manifests", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularConfigMapRemovedActual := regularConfigMapRemovedActual(nsName, configMapName) @@ -1460,7 +1460,7 @@ var _ = Describe("applying manifests", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -1489,7 +1489,7 @@ var _ = Describe("applying manifests", func() { malformedConfigMapJSON := marshalK8sObjJSON(malformedConfigMap) // Create a new Work object with all the manifest JSONs and proper apply strategy. - createWorkObject(workName, nil, regularNSJSON, malformedConfigMapJSON) + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, malformedConfigMapJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -1508,7 +1508,7 @@ var _ = Describe("applying manifests", func() { Consistently(func() error { configMap := &corev1.ConfigMap{} objKey := client.ObjectKey{Namespace: nsName, Name: malformedConfigMap.Name} - if err := memberClient.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { + if err := memberClient1.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) } return nil @@ -1520,7 +1520,7 @@ var _ = Describe("applying manifests", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") }) It("should update the Work object status", func() { @@ -1605,7 +1605,7 @@ var _ = Describe("applying manifests", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the AppliedWork object has been removed. appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) @@ -1650,7 +1650,7 @@ var _ = Describe("work applier garbage collection", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create a new Work object with all the manifest JSONs. - createWorkObject(workName, &fleetv1beta1.ApplyStrategy{AllowCoOwnership: true}, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, &fleetv1beta1.ApplyStrategy{AllowCoOwnership: true}, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -1670,13 +1670,13 @@ var _ = Describe("work applier garbage collection", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") }) It("can mark the deployment as available", func() { @@ -1788,16 +1788,16 @@ var _ = Describe("work applier garbage collection", func() { It("can update Deployment object to add another owner reference", func() { // Retrieve the Deployment object. gotDeploy := &appsv1.Deployment{} - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") // Add another owner reference to the Deployment object. gotDeploy.OwnerReferences = append(gotDeploy.OwnerReferences, anotherOwnerReference) - Expect(memberClient.Update(ctx, gotDeploy)).To(Succeed(), "Failed to update the Deployment object with another owner reference") + Expect(memberClient1.Update(ctx, gotDeploy)).To(Succeed(), "Failed to update the Deployment object with another owner reference") // Ensure that the Deployment object has been updated as expected. Eventually(func() error { // Retrieve the Deployment object again. - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -1819,14 +1819,14 @@ var _ = Describe("work applier garbage collection", func() { It("should start deleting the Work object", func() { // Start deleting the Work object. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) }) It("should start deleting the AppliedWork object", func() { // Ensure that the Work object is being deleted. Eventually(func() error { appliedWork := &fleetv1beta1.AppliedWork{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { return err } if !appliedWork.DeletionTimestamp.IsZero() && controllerutil.ContainsFinalizer(appliedWork, metav1.FinalizerDeleteDependents) { @@ -1844,7 +1844,7 @@ var _ = Describe("work applier garbage collection", func() { Eventually(func() error { // Retrieve the Deployment object again. gotDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } // Check that the Deployment object has been updated as expected. @@ -1876,13 +1876,13 @@ var _ = Describe("work applier garbage collection", func() { // Ensure that the Deployment object still exists. Consistently(func() error { - return memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy) + return memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy) }, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Deployment object has been removed unexpectedly") // Delete objects created by the test suite so that the next test case can run without issues. - Expect(memberClient.Delete(ctx, regularDeploy)).To(Succeed(), "Failed to delete the Deployment object") + Expect(memberClient1.Delete(ctx, regularDeploy)).To(Succeed(), "Failed to delete the Deployment object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -1915,7 +1915,7 @@ var _ = Describe("work applier garbage collection", func() { regularClusterRoleJSON := marshalK8sObjJSON(regularClusterRole) // Create a new Work object with all the manifest JSONs. - createWorkObject(workName, &fleetv1beta1.ApplyStrategy{AllowCoOwnership: true}, regularNSJSON, regularDeployJSON, regularClusterRoleJSON) + createWorkObject(workName, memberReservedNSName1, &fleetv1beta1.ApplyStrategy{AllowCoOwnership: true}, regularNSJSON, regularDeployJSON, regularClusterRoleJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -1935,19 +1935,19 @@ var _ = Describe("work applier garbage collection", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") // Ensure that the ClusterRole object has been applied as expected. regularClusterRoleObjectAppliedActual := regularClusterRoleObjectAppliedActual(clusterRoleName, appliedWorkOwnerRef) Eventually(regularClusterRoleObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the clusterRole object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, regularClusterRole)).To(Succeed(), "Failed to retrieve the clusterRole object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, regularClusterRole)).To(Succeed(), "Failed to retrieve the clusterRole object") }) It("can mark the deployment as available", func() { @@ -2094,11 +2094,11 @@ var _ = Describe("work applier garbage collection", func() { It("can update ClusterRole object to add another owner reference", func() { // Retrieve the ClusterRole object. gotClusterRole := &rbacv1.ClusterRole{} - Expect(memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole)).To(Succeed(), "Failed to retrieve the ClusterRole object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole)).To(Succeed(), "Failed to retrieve the ClusterRole object") // Retrieve the Deployment object. gotDeploy := &appsv1.Deployment{} - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") // Add another owner reference to the ClusterRole object. // Note: This is an invalid owner reference, as it adds a namespace-scoped object as an owner of a cluster-scoped object. @@ -2108,12 +2108,12 @@ var _ = Describe("work applier garbage collection", func() { Name: gotDeploy.Name, UID: gotDeploy.UID, }) - Expect(memberClient.Update(ctx, gotClusterRole)).To(Succeed(), "Failed to update the ClusterRole object with another owner reference") + Expect(memberClient1.Update(ctx, gotClusterRole)).To(Succeed(), "Failed to update the ClusterRole object with another owner reference") // Ensure that the ClusterRole object has been updated as expected. Eventually(func() error { // Retrieve the ClusterRole object again. - if err := memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole); err != nil { return fmt.Errorf("failed to retrieve the ClusterRole object: %w", err) } @@ -2135,14 +2135,14 @@ var _ = Describe("work applier garbage collection", func() { It("should start deleting the Work object", func() { // Start deleting the Work object. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) }) It("should start deleting the AppliedWork object", func() { // Ensure that the Work object is being deleted. Eventually(func() error { appliedWork := &fleetv1beta1.AppliedWork{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { return err } if !appliedWork.DeletionTimestamp.IsZero() && controllerutil.ContainsFinalizer(appliedWork, metav1.FinalizerDeleteDependents) { @@ -2160,7 +2160,7 @@ var _ = Describe("work applier garbage collection", func() { Eventually(func() error { // Retrieve the ClusterRole object again. gotClusterRole := &rbacv1.ClusterRole{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole); err != nil { return fmt.Errorf("failed to retrieve the ClusterRole object: %w", err) } @@ -2196,12 +2196,12 @@ var _ = Describe("work applier garbage collection", func() { // Ensure that the ClusterRole object still exists. Consistently(func() error { - return memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, regularClusterRole) + return memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, regularClusterRole) }, consistentlyDuration, consistentlyInterval).Should(BeNil(), "ClusterRole object has been removed unexpectedly") // Delete objects created by the test suite so that the next test case can run without issues. - Expect(memberClient.Delete(ctx, regularClusterRole)).To(Succeed(), "Failed to delete the clusterRole object") + Expect(memberClient1.Delete(ctx, regularClusterRole)).To(Succeed(), "Failed to delete the clusterRole object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -2234,7 +2234,7 @@ var _ = Describe("work applier garbage collection", func() { regularClusterRoleJSON := marshalK8sObjJSON(regularClusterRole) // Create a new Work object with all the manifest JSONs. - createWorkObject(workName, &fleetv1beta1.ApplyStrategy{AllowCoOwnership: true}, regularNSJSON, regularDeployJSON, regularClusterRoleJSON) + createWorkObject(workName, memberReservedNSName1, &fleetv1beta1.ApplyStrategy{AllowCoOwnership: true}, regularNSJSON, regularDeployJSON, regularClusterRoleJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -2254,19 +2254,19 @@ var _ = Describe("work applier garbage collection", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") // Ensure that the ClusterRole object has been applied as expected. regularClusterRoleObjectAppliedActual := regularClusterRoleObjectAppliedActual(clusterRoleName, appliedWorkOwnerRef) Eventually(regularClusterRoleObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the clusterRole object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, regularClusterRole)).To(Succeed(), "Failed to retrieve the clusterRole object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, regularClusterRole)).To(Succeed(), "Failed to retrieve the clusterRole object") }) It("can mark the deployment as available", func() { @@ -2413,11 +2413,11 @@ var _ = Describe("work applier garbage collection", func() { It("can update Deployment object to add another owner reference", func() { // Retrieve the ClusterRole object. gotClusterRole := &rbacv1.ClusterRole{} - Expect(memberClient.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole)).To(Succeed(), "Failed to retrieve the ClusterRole object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: clusterRoleName}, gotClusterRole)).To(Succeed(), "Failed to retrieve the ClusterRole object") // Retrieve the Deployment object. gotDeploy := &appsv1.Deployment{} - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") // Add another owner reference to the Deployment object. gotDeploy.OwnerReferences = append(gotDeploy.OwnerReferences, metav1.OwnerReference{ @@ -2426,12 +2426,12 @@ var _ = Describe("work applier garbage collection", func() { Name: gotClusterRole.Name, UID: gotClusterRole.UID, }) - Expect(memberClient.Update(ctx, gotDeploy)).To(Succeed(), "Failed to update the Deployment object with another owner reference") + Expect(memberClient1.Update(ctx, gotDeploy)).To(Succeed(), "Failed to update the Deployment object with another owner reference") // Ensure that the Deployment object has been updated as expected. Eventually(func() error { // Retrieve the Deployment object again. - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -2453,14 +2453,14 @@ var _ = Describe("work applier garbage collection", func() { It("should start deleting the Work object", func() { // Start deleting the Work object. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) }) It("should start deleting the AppliedWork object", func() { // Ensure that the Work object is being deleted. Eventually(func() error { appliedWork := &fleetv1beta1.AppliedWork{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: workName}, appliedWork); err != nil { return err } if !appliedWork.DeletionTimestamp.IsZero() && controllerutil.ContainsFinalizer(appliedWork, metav1.FinalizerDeleteDependents) { @@ -2478,7 +2478,7 @@ var _ = Describe("work applier garbage collection", func() { Eventually(func() error { // Retrieve the Deployment object. gotDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, gotDeploy); err != nil { return fmt.Errorf("failed to retrieve the ClusterRole object: %w", err) } @@ -2513,12 +2513,12 @@ var _ = Describe("work applier garbage collection", func() { // Ensure that the Deployment object still exists. Consistently(func() error { - return memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy) + return memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy) }, consistentlyDuration, consistentlyInterval).Should(BeNil(), "Deployment object has been removed unexpectedly") // Delete objects created by the test suite so that the next test case can run without issues. - Expect(memberClient.Delete(ctx, regularDeploy)).To(Succeed(), "Failed to delete the Deployment object") + Expect(memberClient1.Delete(ctx, regularDeploy)).To(Succeed(), "Failed to delete the Deployment object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) }) @@ -2547,8 +2547,8 @@ var _ = Describe("drift detection and takeover", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create the resources on the member cluster side. - Expect(memberClient.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") - Expect(memberClient.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") + Expect(memberClient1.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") markDeploymentAsAvailable(nsName, deployName) @@ -2557,7 +2557,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeIfNoDiff, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -2577,13 +2577,13 @@ var _ = Describe("drift detection and takeover", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") }) It("can mark the deployment as available", func() { @@ -2692,7 +2692,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularDeployRemovedActual := regularDeployRemovedActual(nsName, deployName) @@ -2710,7 +2710,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -2747,8 +2747,8 @@ var _ = Describe("drift detection and takeover", func() { regularDeploy.Spec.Replicas = ptr.To(int32(2)) // Create the resources on the member cluster side. - Expect(memberClient.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") - Expect(memberClient.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") + Expect(memberClient1.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") markDeploymentAsAvailable(nsName, deployName) @@ -2757,7 +2757,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeIfNoDiff, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -2789,7 +2789,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -2818,7 +2818,7 @@ var _ = Describe("drift detection and takeover", func() { wantDeploy.Spec.Replicas = ptr.To(int32(2)) Consistently(func() error { - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -2962,7 +2962,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the Deployment object has been left alone. regularDeployNotRemovedActual := regularDeployNotRemovedActual(nsName, deployName) @@ -2976,7 +2976,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -3012,8 +3012,8 @@ var _ = Describe("drift detection and takeover", func() { regularDeploy.Spec.Replicas = ptr.To(int32(2)) // Create the resources on the member cluster side. - Expect(memberClient.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") - Expect(memberClient.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") + Expect(memberClient1.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") markDeploymentAsAvailable(nsName, deployName) @@ -3022,7 +3022,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypeFullComparison, WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeIfNoDiff, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -3048,7 +3048,7 @@ var _ = Describe("drift detection and takeover", func() { Consistently(func() error { // Retrieve the NS object. - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -3075,7 +3075,7 @@ var _ = Describe("drift detection and takeover", func() { wantDeploy.Spec.Replicas = ptr.To(int32(2)) Consistently(func() error { - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -3240,7 +3240,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the Deployment object has been left alone. regularDeployNotRemovedActual := regularDeployNotRemovedActual(nsName, deployName) @@ -3254,7 +3254,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -3285,7 +3285,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, WhenToApply: fleetv1beta1.WhenToApplyTypeIfNotDrifted, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -3305,13 +3305,13 @@ var _ = Describe("drift detection and takeover", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") }) It("can mark the deployment as available", func() { @@ -3425,7 +3425,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the Deployment object. updatedDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -3433,7 +3433,7 @@ var _ = Describe("drift detection and takeover", func() { updatedDeploy.Spec.Replicas = ptr.To(int32(2)) // Update the Deployment object. - if err := memberClient.Update(ctx, updatedDeploy); err != nil { + if err := memberClient1.Update(ctx, updatedDeploy); err != nil { return fmt.Errorf("failed to update the Deployment object: %w", err) } return nil @@ -3442,7 +3442,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -3453,7 +3453,7 @@ var _ = Describe("drift detection and takeover", func() { updatedNS.Labels[dummyLabelKey] = dummyLabelValue1 // Update the NS object. - if err := memberClient.Update(ctx, updatedNS); err != nil { + if err := memberClient1.Update(ctx, updatedNS); err != nil { return fmt.Errorf("failed to update the NS object: %w", err) } return nil @@ -3477,7 +3477,7 @@ var _ = Describe("drift detection and takeover", func() { Consistently(func() error { // Retrieve the NS object. - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -3509,7 +3509,7 @@ var _ = Describe("drift detection and takeover", func() { wantDeploy.Spec.Replicas = ptr.To(int32(2)) Consistently(func() error { - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -3654,7 +3654,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the Deployment object has been left alone. regularDeployNotRemovedActual := regularDeployNotRemovedActual(nsName, deployName) @@ -3672,7 +3672,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -3698,7 +3698,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypeFullComparison, WhenToApply: fleetv1beta1.WhenToApplyTypeIfNotDrifted, } - createWorkObject(workName, applyStrategy, regularNSJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -3718,7 +3718,7 @@ var _ = Describe("drift detection and takeover", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") }) It("should update the Work object status", func() { @@ -3790,7 +3790,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -3801,7 +3801,7 @@ var _ = Describe("drift detection and takeover", func() { updatedNS.Labels[dummyLabelKey] = dummyLabelValue1 // Update the NS object. - if err := memberClient.Update(ctx, updatedNS); err != nil { + if err := memberClient1.Update(ctx, updatedNS); err != nil { return fmt.Errorf("failed to update the NS object: %w", err) } return nil @@ -3824,7 +3824,7 @@ var _ = Describe("drift detection and takeover", func() { Consistently(func() error { // Retrieve the NS object. - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -3900,7 +3900,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the AppliedWork object has been removed. appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) @@ -3910,7 +3910,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -3938,7 +3938,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, WhenToApply: fleetv1beta1.WhenToApplyTypeAlways, } - createWorkObject(workName, applyStrategy, regularNSJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -3958,7 +3958,7 @@ var _ = Describe("drift detection and takeover", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") }) It("should update the Work object status", func() { @@ -4030,7 +4030,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4041,7 +4041,7 @@ var _ = Describe("drift detection and takeover", func() { updatedNS.Labels[dummyLabelKey] = dummyLabelValue2 // Update the NS object. - if err := memberClient.Update(ctx, updatedNS); err != nil { + if err := memberClient1.Update(ctx, updatedNS); err != nil { return fmt.Errorf("failed to update the NS object: %w", err) } return nil @@ -4065,7 +4065,7 @@ var _ = Describe("drift detection and takeover", func() { nsOverwrittenActual := func() error { // Retrieve the NS object. - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4154,7 +4154,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). @@ -4168,7 +4168,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -4195,7 +4195,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, WhenToApply: fleetv1beta1.WhenToApplyTypeIfNotDrifted, } - createWorkObject(workName, applyStrategy, marshalK8sObjJSON(regularNS)) + createWorkObject(workName, memberReservedNSName1, applyStrategy, marshalK8sObjJSON(regularNS)) }) It("should add cleanup finalizer to the Work object", func() { @@ -4215,7 +4215,7 @@ var _ = Describe("drift detection and takeover", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") }) It("should update the Work object status", func() { @@ -4287,7 +4287,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4298,7 +4298,7 @@ var _ = Describe("drift detection and takeover", func() { updatedNS.Labels[dummyLabelKey] = dummyLabelValue2 // Update the NS object. - if err := memberClient.Update(ctx, updatedNS); err != nil { + if err := memberClient1.Update(ctx, updatedNS); err != nil { return fmt.Errorf("failed to update the NS object: %w", err) } return nil @@ -4321,7 +4321,7 @@ var _ = Describe("drift detection and takeover", func() { Consistently(func() error { // Retrieve the NS object. - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4428,7 +4428,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4515,7 +4515,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). @@ -4529,7 +4529,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -4556,7 +4556,7 @@ var _ = Describe("drift detection and takeover", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, WhenToApply: fleetv1beta1.WhenToApplyTypeIfNotDrifted, } - createWorkObject(workName, applyStrategy, marshalK8sObjJSON(regularNS)) + createWorkObject(workName, memberReservedNSName1, applyStrategy, marshalK8sObjJSON(regularNS)) }) It("should add cleanup finalizer to the Work object", func() { @@ -4576,7 +4576,7 @@ var _ = Describe("drift detection and takeover", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") }) It("should update the Work object status", func() { @@ -4648,7 +4648,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4659,7 +4659,7 @@ var _ = Describe("drift detection and takeover", func() { updatedNS.Labels[dummyLabelKey] = dummyLabelValue2 // Update the NS object. - if err := memberClient.Update(ctx, updatedNS); err != nil { + if err := memberClient1.Update(ctx, updatedNS); err != nil { return fmt.Errorf("failed to update the NS object: %w", err) } return nil @@ -4724,7 +4724,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4735,7 +4735,7 @@ var _ = Describe("drift detection and takeover", func() { updatedNS.Labels[dummyLabelKey] = dummyLabelValue4 // Update the NS object. - if err := memberClient.Update(ctx, updatedNS); err != nil { + if err := memberClient1.Update(ctx, updatedNS); err != nil { return fmt.Errorf("failed to update the NS object: %w", err) } return nil @@ -4793,7 +4793,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). @@ -4807,7 +4807,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -4835,13 +4835,13 @@ var _ = Describe("drift detection and takeover", func() { regularNSJSON := marshalK8sObjJSON(regularNS) // Create the resources on the member cluster side. - Expect(memberClient.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") // Create a new Work object with all the manifest JSONs and proper apply strategy. applyStrategy := &fleetv1beta1.ApplyStrategy{ WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeNever, } - createWorkObject(workName, applyStrategy, regularNSJSON, marshalK8sObjJSON(regularDeploy)) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, marshalK8sObjJSON(regularDeploy)) }) It("should add cleanup finalizer to the Work object", func() { @@ -4861,14 +4861,14 @@ var _ = Describe("drift detection and takeover", func() { regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") }) It("should not apply the manifests that have corresponding resources", func() { Eventually(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -4975,7 +4975,7 @@ var _ = Describe("drift detection and takeover", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularDeployRemovedActual := regularDeployRemovedActual(nsName, deployName) @@ -4989,7 +4989,7 @@ var _ = Describe("drift detection and takeover", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) }) @@ -5014,7 +5014,7 @@ var _ = Describe("report diff", func() { applyStrategy := &fleetv1beta1.ApplyStrategy{ Type: fleetv1beta1.ApplyStrategyTypeReportDiff, } - createWorkObject(workName, applyStrategy, regularNSJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -5085,7 +5085,7 @@ var _ = Describe("report diff", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the AppliedWork object has been removed. appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) @@ -5095,7 +5095,7 @@ var _ = Describe("report diff", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -5122,18 +5122,18 @@ var _ = Describe("report diff", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create the objects first in the member cluster. - Expect(memberClient.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") // Create a diff in the replica count field. regularDeploy.Spec.Replicas = ptr.To(int32(2)) - Expect(memberClient.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") + Expect(memberClient1.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") // Create a new Work object with all the manifest JSONs and proper apply strategy. applyStrategy := &fleetv1beta1.ApplyStrategy{ ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, Type: fleetv1beta1.ApplyStrategyTypeReportDiff, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -5161,7 +5161,7 @@ var _ = Describe("report diff", func() { wantDeploy.Spec.Replicas = ptr.To(int32(2)) deployOwnedButNotApplied := func() error { - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -5221,7 +5221,7 @@ var _ = Describe("report diff", func() { } nsOwnedButNotApplied := func() error { - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -5322,7 +5322,7 @@ var _ = Describe("report diff", func() { Eventually(func() error { // Retrieve the Deployment object. updatedDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -5330,7 +5330,7 @@ var _ = Describe("report diff", func() { updatedDeploy.Spec.Replicas = ptr.To(int32(1)) // Update the Deployment object. - if err := memberClient.Update(ctx, updatedDeploy); err != nil { + if err := memberClient1.Update(ctx, updatedDeploy); err != nil { return fmt.Errorf("failed to update the Deployment object: %w", err) } return nil @@ -5409,7 +5409,7 @@ var _ = Describe("report diff", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the Deployment object has been left alone. regularDeployNotRemovedActual := regularDeployNotRemovedActual(nsName, deployName) @@ -5423,7 +5423,7 @@ var _ = Describe("report diff", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -5449,11 +5449,11 @@ var _ = Describe("report diff", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create the objects first in the member cluster. - Expect(memberClient.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") // Create a diff in the replica count field. regularDeploy.Spec.Replicas = ptr.To(int32(2)) - Expect(memberClient.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") + Expect(memberClient1.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") // Create a new Work object with all the manifest JSONs and proper apply strategy. applyStrategy := &fleetv1beta1.ApplyStrategy{ @@ -5461,7 +5461,7 @@ var _ = Describe("report diff", func() { Type: fleetv1beta1.ApplyStrategyTypeReportDiff, WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeNever, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -5479,7 +5479,7 @@ var _ = Describe("report diff", func() { Consistently(func() error { // Retrieve the NS object. updatedNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, updatedNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -5503,7 +5503,7 @@ var _ = Describe("report diff", func() { Consistently(func() error { // Retrieve the Deployment object. updatedDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, updatedDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -5624,7 +5624,7 @@ var _ = Describe("report diff", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularDeployRemovedActual := regularDeployRemovedActual(nsName, deployName) @@ -5638,7 +5638,7 @@ var _ = Describe("report diff", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -5670,7 +5670,7 @@ var _ = Describe("report diff", func() { applyStrategy := &fleetv1beta1.ApplyStrategy{ Type: fleetv1beta1.ApplyStrategyTypeReportDiff, } - createWorkObject(workName, applyStrategy, regularNSJSON, malformedConfigMapJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, malformedConfigMapJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -5689,7 +5689,7 @@ var _ = Describe("report diff", func() { Consistently(func() error { configMap := &corev1.ConfigMap{} objKey := client.ObjectKey{Namespace: nsName, Name: malformedConfigMap.Name} - if err := memberClient.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { + if err := memberClient1.Get(ctx, objKey, configMap); !errors.IsNotFound(err) { return fmt.Errorf("the config map exists, or an unexpected error has occurred: %w", err) } return nil @@ -5768,7 +5768,7 @@ var _ = Describe("report diff", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that the AppliedWork object has been removed. appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) @@ -5807,18 +5807,18 @@ var _ = Describe("handling different apply strategies", func() { regularDeployJSON := marshalK8sObjJSON(regularDeploy) // Create the objects first in the member cluster. - Expect(memberClient.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, regularNS)).To(Succeed(), "Failed to create the NS object") // Create a diff in the replica count field. regularDeploy.Spec.Replicas = ptr.To(int32(2)) - Expect(memberClient.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") + Expect(memberClient1.Create(ctx, regularDeploy)).To(Succeed(), "Failed to create the Deployment object") // Create a new Work object with all the manifest JSONs and proper apply strategy. applyStrategy := &fleetv1beta1.ApplyStrategy{ ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, Type: fleetv1beta1.ApplyStrategyTypeReportDiff, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -5846,7 +5846,7 @@ var _ = Describe("handling different apply strategies", func() { wantDeploy.Spec.Replicas = ptr.To(int32(2)) deployOwnedButNotApplied := func() error { - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -5906,7 +5906,7 @@ var _ = Describe("handling different apply strategies", func() { } nsOwnedButNotApplied := func() error { - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -6005,7 +6005,7 @@ var _ = Describe("handling different apply strategies", func() { It("can update the apply strategy", func() { Eventually(func() error { work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); err != nil { return fmt.Errorf("failed to retrieve the Work object: %w", err) } @@ -6126,7 +6126,7 @@ var _ = Describe("handling different apply strategies", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularDeployRemovedActual := regularDeployRemovedActual(nsName, deployName) @@ -6144,7 +6144,7 @@ var _ = Describe("handling different apply strategies", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -6175,7 +6175,7 @@ var _ = Describe("handling different apply strategies", func() { ComparisonOption: fleetv1beta1.ComparisonOptionTypePartialComparison, Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -6195,13 +6195,13 @@ var _ = Describe("handling different apply strategies", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the Deployment object has been applied as expected. regularDeploymentObjectAppliedActual := regularDeploymentObjectAppliedActual(nsName, deployName, appliedWorkOwnerRef) Eventually(regularDeploymentObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the deployment object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, regularDeploy)).To(Succeed(), "Failed to retrieve the Deployment object") }) It("should update the AppliedWork object status", func() { @@ -6309,7 +6309,7 @@ var _ = Describe("handling different apply strategies", func() { It("can update the apply strategy", func() { Eventually(func() error { work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); err != nil { return fmt.Errorf("failed to retrieve the Work object: %w", err) } @@ -6387,7 +6387,7 @@ var _ = Describe("handling different apply strategies", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularDeployRemovedActual := regularDeployRemovedActual(nsName, deployName) @@ -6405,7 +6405,7 @@ var _ = Describe("handling different apply strategies", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -6433,10 +6433,10 @@ var _ = Describe("handling different apply strategies", func() { // Create objects in the member cluster. preExistingNS := regularNS.DeepCopy() - Expect(memberClient.Create(ctx, preExistingNS)).To(Succeed(), "Failed to create the NS object") + Expect(memberClient1.Create(ctx, preExistingNS)).To(Succeed(), "Failed to create the NS object") preExistingDeploy := regularDeploy.DeepCopy() preExistingDeploy.Spec.Replicas = ptr.To(int32(2)) - Expect(memberClient.Create(ctx, preExistingDeploy)).To(Succeed(), "Failed to create the Deployment object") + Expect(memberClient1.Create(ctx, preExistingDeploy)).To(Succeed(), "Failed to create the Deployment object") // Create a new Work object with all the manifest JSONs and proper apply strategy. applyStrategy := &fleetv1beta1.ApplyStrategy{ @@ -6444,7 +6444,7 @@ var _ = Describe("handling different apply strategies", func() { Type: fleetv1beta1.ApplyStrategyTypeClientSideApply, WhenToTakeOver: fleetv1beta1.WhenToTakeOverTypeNever, } - createWorkObject(workName, applyStrategy, regularNSJSON, regularDeployJSON) + createWorkObject(workName, memberReservedNSName1, applyStrategy, regularNSJSON, regularDeployJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -6467,7 +6467,7 @@ var _ = Describe("handling different apply strategies", func() { Consistently(func() error { preExistingNS := &corev1.Namespace{} - if err := memberClient.Get(ctx, client.ObjectKey{Name: nsName}, preExistingNS); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, preExistingNS); err != nil { return fmt.Errorf("failed to retrieve the NS object: %w", err) } @@ -6493,7 +6493,7 @@ var _ = Describe("handling different apply strategies", func() { Consistently(func() error { preExistingDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, preExistingDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, preExistingDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -6598,7 +6598,7 @@ var _ = Describe("handling different apply strategies", func() { It("can update the apply strategy", func() { Eventually(func() error { work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); err != nil { return fmt.Errorf("failed to retrieve the Work object: %w", err) } @@ -6619,7 +6619,7 @@ var _ = Describe("handling different apply strategies", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") }) It("should not take over some objects", func() { @@ -6632,7 +6632,7 @@ var _ = Describe("handling different apply strategies", func() { Consistently(func() error { preExistingDeploy := &appsv1.Deployment{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, preExistingDeploy); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: deployName}, preExistingDeploy); err != nil { return fmt.Errorf("failed to retrieve the Deployment object: %w", err) } @@ -6772,7 +6772,7 @@ var _ = Describe("handling different apply strategies", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure applied manifest has been removed. regularDeployRemovedActual := regularDeployRemovedActual(nsName, deployName) @@ -6790,7 +6790,7 @@ var _ = Describe("handling different apply strategies", func() { Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") // The environment prepared by the envtest package does not support namespace - // deletion; consequently this test suite would not attempt so verify its deletion. + // deletion; consequently this test suite would not attempt to verify its deletion. }) }) @@ -6843,7 +6843,7 @@ var _ = Describe("handling different apply strategies", func() { oversizedCMJSON := marshalK8sObjJSON(oversizedCM) // Create a new Work object with all the manifest JSONs and proper apply strategy. - createWorkObject(workName, nil, regularNSJSON, oversizedCMJSON) + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, oversizedCMJSON) }) It("should add cleanup finalizer to the Work object", func() { @@ -6863,12 +6863,12 @@ var _ = Describe("handling different apply strategies", func() { regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") - Expect(memberClient.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") // Ensure that the oversized ConfigMap object has been applied as expected via SSA. Eventually(func() error { gotConfigMap := &corev1.ConfigMap{} - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, gotConfigMap); err != nil { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, gotConfigMap); err != nil { return fmt.Errorf("failed to retrieve the ConfigMap object: %w", err) } @@ -6913,7 +6913,7 @@ var _ = Describe("handling different apply strategies", func() { return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the oversized configMap object") - Expect(memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, oversizedCM)).To(Succeed(), "Failed to retrieve the ConfigMap object") + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, oversizedCM)).To(Succeed(), "Failed to retrieve the ConfigMap object") }) It("should update the Work object status", func() { @@ -6988,7 +6988,7 @@ var _ = Describe("handling different apply strategies", func() { AfterAll(func() { // Delete the Work object and related resources. - deleteWorkObject(workName) + deleteWorkObject(workName, memberReservedNSName1) // Ensure that all applied manifests have been removed. appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) @@ -7002,16 +7002,795 @@ var _ = Describe("handling different apply strategies", func() { Name: configMapName, }, } - if err := memberClient.Delete(ctx, cm); err != nil && !errors.IsNotFound(err) { + if err := memberClient1.Delete(ctx, cm); err != nil && !errors.IsNotFound(err) { return fmt.Errorf("failed to delete the ConfigMap object: %w", err) } - if err := memberClient.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, cm); !errors.IsNotFound(err) { + if err := memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, cm); !errors.IsNotFound(err) { return fmt.Errorf("the ConfigMap object still exists or an unexpected error occurred: %w", err) } return nil }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the oversized configMap object") + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt to verify its deletion. + }) + }) +}) + +var _ = Describe("negative cases", func() { + Context("decoding error", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var appliedWorkOwnerRef *metav1.OwnerReference + var regularNS *corev1.Namespace + var regularConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Prepare a ConfigMap object. + regularConfigMap = configMap.DeepCopy() + regularConfigMap.Namespace = nsName + regularConfigMapJson := marshalK8sObjJSON(regularConfigMap) + + // Prepare a piece of malformed JSON data. + malformedConfigMap := configMap.DeepCopy() + malformedConfigMap.Namespace = nsName + malformedConfigMap.Name = "gibberish" + malformedConfigMap.TypeMeta = metav1.TypeMeta{ + Kind: "MalformedObj", + APIVersion: "v10", + } + malformedConfigMapJSON := marshalK8sObjJSON(malformedConfigMap) + + // Create a Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, malformedConfigMapJSON, regularConfigMapJson) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should apply some manifests", func() { + // Ensure that the NS object has been applied as expected. + regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) + Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") + + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + + // Ensure that the ConfigMap object has been applied as expected. + regularConfigMapAppliedActual := regularConfigMapObjectAppliedActual(nsName, configMapName, appliedWorkOwnerRef) + Eventually(regularConfigMapAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the ConfigMap object") + + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, regularConfigMap)).To(Succeed(), "Failed to retrieve the ConfigMap object") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsAppliedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Kind: "MalformedObj", + Group: "", + Version: "v10", + Name: "gibberish", + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeDecodingErred), + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedResourceMeta := []fleetv1beta1.AppliedResourceMeta{ + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + UID: regularNS.UID, + }, + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + UID: regularConfigMap.UID, + }, + } + + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, appliedResourceMeta) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + Consistently(appliedWorkStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "AppliedWork status changed unexpectedly") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName1) + + // Ensure applied manifest has been removed. + regularConfigMapRemovedActual := regularConfigMapRemovedActual(nsName, configMapName) + Eventually(regularConfigMapRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the configMap object") + + // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object + // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). + checkNSOwnerReferences(workName, nsName) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt so verify its deletion. + }) + }) + + Context("object with generate name", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var appliedWorkOwnerRef *metav1.OwnerReference + var regularNS *corev1.Namespace + var regularConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Prepare a ConfigMap object. + regularConfigMap = configMap.DeepCopy() + regularConfigMap.GenerateName = "cm-" + regularConfigMap.Name = "" + regularConfigMap.Namespace = nsName + regularConfigMapJSON := marshalK8sObjJSON(regularConfigMap) + + // Create a Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, regularConfigMapJSON) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should apply some manifests", func() { + // Ensure that the NS object has been applied as expected. + regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) + Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") + + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsAppliedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFoundGenerateName), + ObservedGeneration: 0, + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedResourceMeta := []fleetv1beta1.AppliedResourceMeta{ + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + UID: regularNS.UID, + }, + } + + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, appliedResourceMeta) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + Consistently(appliedWorkStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "AppliedWork status changed unexpectedly") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName1) + + // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object + // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). + checkNSOwnerReferences(workName, nsName) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt so verify its deletion. + }) + }) + + Context("duplicated manifests", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var appliedWorkOwnerRef *metav1.OwnerReference + var regularNS *corev1.Namespace + var regularConfigMap *corev1.ConfigMap + var duplicatedConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Prepare a ConfigMap object. + regularConfigMap = configMap.DeepCopy() + regularConfigMap.Namespace = nsName + regularConfigMapJSON := marshalK8sObjJSON(regularConfigMap) + + duplicatedConfigMap = configMap.DeepCopy() + duplicatedConfigMap.Namespace = nsName + duplicatedConfigMapJSON := marshalK8sObjJSON(duplicatedConfigMap) + duplicatedConfigMap.Data[dummyLabelKey] = dummyLabelValue2 + + // Create a Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, regularConfigMapJSON, duplicatedConfigMapJSON) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should apply some manifests", func() { + // Ensure that the NS object has been applied as expected. + regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) + Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") + + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + + // Ensure that the ConfigMap object has been applied as expected. + regularConfigMapAppliedActual := regularConfigMapObjectAppliedActual(nsName, configMapName, appliedWorkOwnerRef) + Eventually(regularConfigMapAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the ConfigMap object") + + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, regularConfigMap)).To(Succeed(), "Failed to retrieve the ConfigMap object") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsAppliedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Kind: "ConfigMap", + Group: "", + Version: "v1", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeDuplicated), + ObservedGeneration: 0, + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedResourceMeta := []fleetv1beta1.AppliedResourceMeta{ + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + UID: regularNS.UID, + }, + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + UID: regularConfigMap.UID, + }, + } + + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, appliedResourceMeta) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + Consistently(appliedWorkStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "AppliedWork status changed unexpectedly") + }) + }) + + Context("mixed (pre-processing)", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + var appliedWorkOwnerRef *metav1.OwnerReference + var regularNS *corev1.Namespace + var regularConfigMap *corev1.ConfigMap + var malformedConfigMap *corev1.ConfigMap + var configMapWithGenerateName *corev1.ConfigMap + var duplicatedConfigMap *corev1.ConfigMap + + BeforeAll(func() { + // Prepare a NS object. + regularNS = ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Prepare a ConfigMap object. + regularConfigMap = configMap.DeepCopy() + regularConfigMap.Namespace = nsName + regularConfigMapJSON := marshalK8sObjJSON(regularConfigMap) + + // Prepare a piece of malformed JSON data. + malformedConfigMap = configMap.DeepCopy() + malformedConfigMap.Namespace = nsName + malformedConfigMap.Name = "gibberish" + malformedConfigMap.TypeMeta = metav1.TypeMeta{ + Kind: "MalformedObj", + APIVersion: "v10", + } + malformedConfigMapJSON := marshalK8sObjJSON(malformedConfigMap) + + // Prepare a ConfigMap object with generate name. + configMapWithGenerateName = configMap.DeepCopy() + configMapWithGenerateName.Name = "" + configMapWithGenerateName.Namespace = nsName + configMapWithGenerateName.GenerateName = "cm-" + configMapWithGenerateNameJSON := marshalK8sObjJSON(configMapWithGenerateName) + + // Prepare a duplicated ConfigMap object. + duplicatedConfigMap = configMap.DeepCopy() + duplicatedConfigMap.Namespace = nsName + duplicatedConfigMap.Data[dummyLabelKey] = dummyLabelValue2 + duplicatedConfigMapJSON := marshalK8sObjJSON(duplicatedConfigMap) + + // Create a Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName1, nil, regularNSJSON, regularConfigMapJSON, malformedConfigMapJSON, configMapWithGenerateNameJSON, duplicatedConfigMapJSON) + }) + + It("should add cleanup finalizer to the Work object", func() { + finalizerAddedActual := workFinalizerAddedActual(workName) + Eventually(finalizerAddedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to add cleanup finalizer to the Work object") + }) + + It("should prepare an AppliedWork object", func() { + appliedWorkCreatedActual := appliedWorkCreatedActual(workName) + Eventually(appliedWorkCreatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to prepare an AppliedWork object") + + appliedWorkOwnerRef = prepareAppliedWorkOwnerRef(workName) + }) + + It("should apply some manifests", func() { + // Ensure that the NS object has been applied as expected. + regularNSObjectAppliedActual := regularNSObjectAppliedActual(nsName, appliedWorkOwnerRef) + Eventually(regularNSObjectAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") + + Expect(memberClient1.Get(ctx, client.ObjectKey{Name: nsName}, regularNS)).To(Succeed(), "Failed to retrieve the NS object") + + // Ensure that the ConfigMap object has been applied as expected. + regularConfigMapAppliedActual := regularConfigMapObjectAppliedActual(nsName, configMapName, appliedWorkOwnerRef) + Eventually(regularConfigMapAppliedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the ConfigMap object") + + Expect(memberClient1.Get(ctx, client.ObjectKey{Namespace: nsName, Name: configMapName}, regularConfigMap)).To(Succeed(), "Failed to retrieve the ConfigMap object") + }) + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: condition.WorkNotAllManifestsAppliedReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 2, + Kind: "MalformedObj", + Group: "", + Version: "v10", + Name: "gibberish", + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeDecodingErred), + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 3, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeFoundGenerateName), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 4, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Namespace: nsName, + Name: configMapName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(ApplyOrReportDiffResTypeDuplicated), + ObservedGeneration: 0, + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") + Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") + }) + + It("should update the AppliedWork object status", func() { + // Prepare the status information. + appliedResourceMeta := []fleetv1beta1.AppliedResourceMeta{ + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + UID: regularNS.UID, + }, + { + WorkResourceIdentifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + UID: regularConfigMap.UID, + }, + } + + appliedWorkStatusUpdatedActual := appliedWorkStatusUpdated(workName, appliedResourceMeta) + Eventually(appliedWorkStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update appliedWork status") + Consistently(appliedWorkStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "AppliedWork status changed unexpectedly") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName1) + + // Ensure applied manifest has been removed. + regularConfigMapRemovedActual := regularConfigMapRemovedActual(nsName, configMapName) + Eventually(regularConfigMapRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the configMap object") + + // Kubebuilder suggests that in a testing environment like this, to check for the existence of the AppliedWork object + // OwnerReference in the Namespace object (https://book.kubebuilder.io/reference/envtest.html#testing-considerations). + checkNSOwnerReferences(workName, nsName) + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + // The environment prepared by the envtest package does not support namespace // deletion; consequently this test suite would not attempt so verify its deletion. }) diff --git a/pkg/controllers/workapplier/drift_detection_takeover_test.go b/pkg/controllers/workapplier/drift_detection_takeover_test.go index 017633d65..4cdc4398d 100644 --- a/pkg/controllers/workapplier/drift_detection_takeover_test.go +++ b/pkg/controllers/workapplier/drift_detection_takeover_test.go @@ -129,7 +129,7 @@ func TestTakeOverPreExistingObject(t *testing.T) { workObj: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: "dummy-work", - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, }, }, applyStrategy: &fleetv1beta1.ApplyStrategy{ @@ -206,7 +206,7 @@ func TestTakeOverPreExistingObject(t *testing.T) { r := &Reconciler{ hubClient: fakeHubClient, spokeDynamicClient: fakeMemberClient, - workNameSpace: memberReservedNSName, + workNameSpace: memberReservedNSName1, } takenOverObj, patchDetails, err := r.takeOverPreExistingObject( @@ -496,7 +496,7 @@ func TestRemoveLeftBehindAppliedWorkOwnerRefs(t *testing.T) { workObj: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, }, }, }, @@ -512,7 +512,7 @@ func TestRemoveLeftBehindAppliedWorkOwnerRefs(t *testing.T) { r := &Reconciler{ hubClient: fakeHubClient, - workNameSpace: memberReservedNSName, + workNameSpace: memberReservedNSName1, } gotOwnerRefs, err := r.removeLeftBehindAppliedWorkOwnerRefs(ctx, tc.ownerRefs) diff --git a/pkg/controllers/workapplier/preprocess.go b/pkg/controllers/workapplier/preprocess.go index 7352ef8a9..611ed6012 100644 --- a/pkg/controllers/workapplier/preprocess.go +++ b/pkg/controllers/workapplier/preprocess.go @@ -77,6 +77,23 @@ func (r *Reconciler) preProcessManifests( bundle.manifestObj = manifestObj bundle.gvr = gvr + + // Add the string representation of the work resource identifier to the bundle. + // + // Note that the string representation ignores the ordinal number in the identifier. + wriStr, err := formatWRIString(bundle.id) + if err != nil { + // The work resource identifier cannot be formatted as a string. The formatting will + // fail if and only if the manifest data cannot be decoded (a failure in the previous + // step); normally this branch will never run as such error should have already terminated + // the pre-processing stage. + klog.ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("failed to format the work resource identifier as a string: %w", err)), + "Failed to format the work resource identifier as a string", + "ordinal", pieces, "work", klog.KObj(work)) + return + } + bundle.workResourceIdentifierStr = wriStr + klog.V(2).InfoS("Decoded a manifest", "manifestObj", klog.KObj(manifestObj), "GVR", *gvr, @@ -84,6 +101,14 @@ func (r *Reconciler) preProcessManifests( } r.parallelizer.ParallelizeUntil(childCtx, len(bundles), doWork, "decodingManifests") + // Check for duplicated manifests. + // + // This is to address a corner case where users might have specified the same manifest + // twice in resource envelopes (or both in the envelopes and directly in the hub cluster). + // + // Note that the CRP/RP APIs will block repeated resource selectors. + checkForDuplicatedManifests(bundles, work) + // Write ahead the manifest processing attempts in the Work object status. In the process // Fleet will also perform a cleanup to remove any left-over manifests that are applied // from previous runs. @@ -141,9 +166,6 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( // lookups. existingManifestCondQIdx := prepareExistingManifestCondQIdx(work.Status.ManifestConditions) - // For each manifest, verify if it has been tracked in the newly prepared manifest conditions. - // This helps signal duplicated resources in the Work object. - checked := make(map[string]bool, len(bundles)) for idx := range bundles { bundle := bundles[idx] if bundle.applyOrReportDiffErr != nil { @@ -153,45 +175,18 @@ func (r *Reconciler) writeAheadManifestProcessingAttempts( // Such manifests would still be reported in the status (see the later parts of the // reconciliation loop), it is just that they are not relevant in the write-ahead // process. - klog.V(2).InfoS("Skipped a manifest in the write-ahead process as it has failed pre-processing", "work", workRef, - "ordinal", idx, "applyErr", bundle.applyOrReportDiffErr, "applyResTyp", bundle.applyOrReportDiffResTyp) + klog.V(2).InfoS("Skipped a manifest in the write-ahead process as it has failed the decoding process or the duplicated manifest checking process", + "work", workRef, "ordinal", idx, + "applyErr", bundle.applyOrReportDiffErr, "applyResTyp", bundle.applyOrReportDiffResTyp) continue } - // Register the manifest in the checked map; if another manifest with the same identifier - // has been checked before, Fleet would mark the current manifest as a duplicate and skip - // it. This is to address a corner case where users might have specified the same manifest - // twice in resource envelopes; duplication will not occur if the manifests are directly - // created in the hub cluster. - // - // A side note: Golang does support using structs as map keys; preparing the string - // representations of structs as keys can help performance, though not by much. The reason - // why string representations are used here is not for performance, though; instead, it - // is to address the issue that for this comparison, ordinals should be ignored. - wriStr, err := formatWRIString(bundle.id) - if err != nil { - // Normally this branch will never run as all manifests that cannot be decoded has been - // skipped in the check above. Here Fleet simply skips the manifest. - klog.ErrorS(err, "Failed to format the work resource identifier string", - "ordinal", idx, "work", workRef) - _ = controller.NewUnexpectedBehaviorError(err) - continue - } - if _, found := checked[wriStr]; found { - klog.V(2).InfoS("A duplicate manifest has been found", - "ordinal", idx, "work", workRef, "workResourceID", wriStr) - bundle.applyOrReportDiffErr = fmt.Errorf("a duplicate manifest has been found") - bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeDuplicated - continue - } - checked[wriStr] = true - // Prepare the manifest conditions for the write-ahead process. - manifestCondForWA := prepareManifestCondForWriteAhead(wriStr, bundle.id, work.Generation, existingManifestCondQIdx, work.Status.ManifestConditions) + manifestCondForWA := prepareManifestCondForWriteAhead(bundle.workResourceIdentifierStr, bundle.id, work.Generation, existingManifestCondQIdx, work.Status.ManifestConditions) manifestCondsForWA = append(manifestCondsForWA, manifestCondForWA) klog.V(2).InfoS("Prepared write-ahead information for a manifest", - "manifestObj", klog.KObj(bundle.manifestObj), "workResourceID", wriStr, "work", workRef) + "manifestObj", klog.KObj(bundle.manifestObj), "workResourceID", bundle.workResourceIdentifierStr, "work", workRef) } // Identify any manifests from previous runs that might have been applied and are now left @@ -278,6 +273,37 @@ func buildWorkResourceIdentifier( return identifier } +// checkForDuplicateManifests checks for duplicated manifests in the bundles. +func checkForDuplicatedManifests(bundles []*manifestProcessingBundle, work *fleetv1beta1.Work) { + checked := make(map[string]bool, len(bundles)) + + for idx := range bundles { + bundle := bundles[idx] + if bundle.applyOrReportDiffErr != nil { + // Skip a manifest if it cannot be pre-processed, i.e., it can only be identified by + // its ordinal. + // + // Such manifests would still be reported in the status (see the later parts of the + // reconciliation loop), it is just that they are not relevant in the duplicated manifest + // checking process. + klog.V(2).InfoS("Skipped a manifest in the duplicated manifest checking process as it has failed in the decoding process", + "work", klog.KObj(work), "ordinal", idx, + "applyErr", bundle.applyOrReportDiffErr, "applyResTyp", bundle.applyOrReportDiffResTyp) + continue + } + + if _, found := checked[bundle.workResourceIdentifierStr]; found { + klog.V(2).InfoS("A duplicate manifest has been found", + "ordinal", idx, "work", klog.KObj(work), "workResourceID", bundle.workResourceIdentifierStr) + bundle.applyOrReportDiffErr = fmt.Errorf("a duplicate manifest has been found") + bundle.applyOrReportDiffResTyp = ApplyOrReportDiffResTypeDuplicated + continue + } + checked[bundle.workResourceIdentifierStr] = true + } + klog.V(2).InfoS("Completed the duplicated manifest checking process", "work", klog.KObj(work)) +} + // prepareExistingManifestCondQIdx returns a map that allows quicker look up of a manifest // condition given a work resource identifier. func prepareExistingManifestCondQIdx(existingManifestConditions []fleetv1beta1.ManifestCondition) map[string]int { diff --git a/pkg/controllers/workapplier/preprocess_test.go b/pkg/controllers/workapplier/preprocess_test.go index 4f229a52b..3e453d642 100644 --- a/pkg/controllers/workapplier/preprocess_test.go +++ b/pkg/controllers/workapplier/preprocess_test.go @@ -89,6 +89,100 @@ func TestBuildWorkResourceIdentifier(t *testing.T) { } } +// TestCheckForDuplicatedManifests tests the checkForDuplicatedManifests function. +func TestCheckForDuplicatedManifests(t *testing.T) { + wriStr1 := fmt.Sprintf("GV=/v1, Kind=Namespace, Namespace=, Name=%s", nsName) + wriStr2 := fmt.Sprintf("GV=apps/v1, Kind=Deployment, Namespace=%s, Name=%s", nsName, deployName) + wriStr3 := fmt.Sprintf("GV=apps/v1, Kind=ConfigMap, Namespace=%s, Name=%s", nsName, configMapName) + + work := &fleetv1beta1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: workName, + Namespace: memberReservedNSName1, + }, + } + + testCases := []struct { + name string + bundles []*manifestProcessingBundle + wantBundles []*manifestProcessingBundle + }{ + { + name: "no duplicates", + bundles: []*manifestProcessingBundle{ + { + workResourceIdentifierStr: wriStr1, + }, + { + workResourceIdentifierStr: wriStr2, + }, + { + workResourceIdentifierStr: wriStr3, + }, + }, + wantBundles: []*manifestProcessingBundle{ + { + workResourceIdentifierStr: wriStr1, + }, + { + workResourceIdentifierStr: wriStr2, + }, + { + workResourceIdentifierStr: wriStr3, + }, + }, + }, + { + name: "with duplicates", + bundles: []*manifestProcessingBundle{ + { + workResourceIdentifierStr: wriStr1, + }, + { + workResourceIdentifierStr: wriStr2, + }, + { + workResourceIdentifierStr: wriStr1, + }, + }, + wantBundles: []*manifestProcessingBundle{ + { + workResourceIdentifierStr: wriStr1, + }, + { + workResourceIdentifierStr: wriStr2, + }, + { + workResourceIdentifierStr: wriStr1, + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeDuplicated, + applyOrReportDiffErr: fmt.Errorf("a duplicate manifest has been found"), + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + checkForDuplicatedManifests(tc.bundles, work) + if diff := cmp.Diff( + tc.bundles, tc.wantBundles, + cmp.AllowUnexported(manifestProcessingBundle{}), + cmp.Comparer(func(e1, e2 error) bool { + if e1 == nil && e2 == nil { + return true + } + if e1 != nil && e2 != nil { + return e1.Error() == e2.Error() + } + return false + }), + ); diff != "" { + t.Errorf("checkForDuplicatedManifests() results mismatch (-got +want):\n%s", diff) + } + }) + } +} + // TestRemoveLeftOverManifests tests the removeLeftOverManifests method. func TestRemoveLeftOverManifests(t *testing.T) { ctx := context.Background() diff --git a/pkg/controllers/workapplier/status.go b/pkg/controllers/workapplier/status.go index d095c21d7..53145f11b 100644 --- a/pkg/controllers/workapplier/status.go +++ b/pkg/controllers/workapplier/status.go @@ -620,8 +620,7 @@ func prepareRebuiltManifestCondQIdx(bundles []*manifestProcessingBundle) map[str for idx := range bundles { bundle := bundles[idx] - wriStr, err := formatWRIString(bundle.id) - if err != nil { + if len(bundle.workResourceIdentifierStr) == 0 { // There might be manifest conditions without a valid identifier in the bundle set // (e.g., decoding error has occurred when processing a bundle). // Fleet will skip these bundles, as there is no need to port back @@ -629,8 +628,7 @@ func prepareRebuiltManifestCondQIdx(bundles []*manifestProcessingBundle) map[str // identifiable). This is not considered as an error. continue } - - rebuiltManifestCondQIdx[wriStr] = idx + rebuiltManifestCondQIdx[bundle.workResourceIdentifierStr] = idx } return rebuiltManifestCondQIdx } diff --git a/pkg/controllers/workapplier/status_test.go b/pkg/controllers/workapplier/status_test.go index d9555927b..a8d9f3a96 100644 --- a/pkg/controllers/workapplier/status_test.go +++ b/pkg/controllers/workapplier/status_test.go @@ -52,7 +52,7 @@ func TestRefreshWorkStatus(t *testing.T) { workNS := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: memberReservedNSName, + Name: memberReservedNSName1, }, } @@ -79,7 +79,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Generation: 1, }, }, @@ -148,7 +148,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Generation: 2, }, }, @@ -236,7 +236,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, }, }, bundles: []*manifestProcessingBundle{ @@ -374,7 +374,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Generation: 2, }, Status: fleetv1beta1.WorkStatus{ @@ -444,9 +444,10 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), - applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDrifts, - availabilityResTyp: AvailabilityResultTypeSkipped, + workResourceIdentifierStr: fmt.Sprintf("GV=apps/v1, Kind=Deployment, Namespace=%s, Name=%s", nsName, deployName), + inMemberClusterObj: toUnstructured(t, deploy.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFoundDrifts, + availabilityResTyp: AvailabilityResultTypeSkipped, drifts: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -464,9 +465,10 @@ func TestRefreshWorkStatus(t *testing.T) { Namespace: nsName, Resource: "deployments", }, - inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), - applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, - availabilityResTyp: AvailabilityResultTypeSkipped, + workResourceIdentifierStr: fmt.Sprintf("GV=apps/v1, Kind=Deployment, Namespace=%s, Name=%s", nsName, deployName2), + inMemberClusterObj: toUnstructured(t, deploy2.DeepCopy()), + applyOrReportDiffResTyp: ApplyOrReportDiffResTypeFailedToTakeOver, + availabilityResTyp: AvailabilityResultTypeSkipped, diffs: []fleetv1beta1.PatchDetail{ { Path: "/spec/replicas", @@ -550,7 +552,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Generation: 2, }, Spec: fleetv1beta1.WorkSpec{ @@ -660,7 +662,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Generation: 2, }, Spec: fleetv1beta1.WorkSpec{ @@ -758,7 +760,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Generation: 2, }, Spec: fleetv1beta1.WorkSpec{ @@ -921,7 +923,7 @@ func TestRefreshWorkStatus(t *testing.T) { work: &fleetv1beta1.Work{ ObjectMeta: metav1.ObjectMeta{ Name: workName, - Namespace: memberReservedNSName, + Namespace: memberReservedNSName1, Generation: 2, }, Spec: fleetv1beta1.WorkSpec{ @@ -1074,7 +1076,7 @@ func TestRefreshWorkStatus(t *testing.T) { Build() r := &Reconciler{ hubClient: fakeClient, - workNameSpace: memberReservedNSName, + workNameSpace: memberReservedNSName1, } err := r.refreshWorkStatus(ctx, tc.work, tc.bundles) @@ -1083,7 +1085,7 @@ func TestRefreshWorkStatus(t *testing.T) { } updatedWork := &fleetv1beta1.Work{} - if err := fakeClient.Get(ctx, types.NamespacedName{Namespace: memberReservedNSName, Name: workName}, updatedWork); err != nil { + if err := fakeClient.Get(ctx, types.NamespacedName{Namespace: memberReservedNSName1, Name: workName}, updatedWork); err != nil { t.Fatalf("Work Get() = %v, want no error", err) } opts := []cmp.Option{ diff --git a/pkg/controllers/workapplier/suite_test.go b/pkg/controllers/workapplier/suite_test.go index 40be7a437..c61e0d82a 100644 --- a/pkg/controllers/workapplier/suite_test.go +++ b/pkg/controllers/workapplier/suite_test.go @@ -20,6 +20,7 @@ import ( "context" "flag" "path/filepath" + "sync" "testing" "time" @@ -33,12 +34,15 @@ import ( "k8s.io/klog/v2" "k8s.io/klog/v2/textlogger" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + ctrloption "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/metrics/server" + "sigs.k8s.io/controller-runtime/pkg/predicate" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" testv1alpha1 "github.com/kubefleet-dev/kubefleet/test/apis/v1alpha1" @@ -47,18 +51,27 @@ import ( // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( - hubCfg *rest.Config - memberCfg *rest.Config - hubEnv *envtest.Environment - memberEnv *envtest.Environment - hubMgr manager.Manager - hubClient client.Client - memberClient client.Client - memberDynamicClient dynamic.Interface - workApplier *Reconciler + hubCfg *rest.Config + hubEnv *envtest.Environment + hubClient client.Client + + memberCfg1 *rest.Config + memberEnv1 *envtest.Environment + hubMgr1 manager.Manager + memberClient1 client.Client + memberDynamicClient1 dynamic.Interface + workApplier1 *Reconciler + + memberCfg2 *rest.Config + memberEnv2 *envtest.Environment + hubMgr2 manager.Manager + memberClient2 client.Client + memberDynamicClient2 dynamic.Interface + workApplier2 *Reconciler ctx context.Context cancel context.CancelFunc + wg sync.WaitGroup ) const ( @@ -67,7 +80,8 @@ const ( // The count of workers for the work applier controller. workerCount = 4 - memberReservedNSName = "fleet-member-experimental" + memberReservedNSName1 = "fleet-member-experimental-1" + memberReservedNSName2 = "fleet-member-experimental-2" ) func TestAPIs(t *testing.T) { @@ -77,12 +91,19 @@ func TestAPIs(t *testing.T) { } func setupResources() { - ns := &corev1.Namespace{ + ns1 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: memberReservedNSName1, + }, + } + Expect(hubClient.Create(ctx, ns1)).To(Succeed()) + + ns2 := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: memberReservedNSName, + Name: memberReservedNSName2, }, } - Expect(hubClient.Create(ctx, ns)).To(Succeed()) + Expect(hubClient.Create(ctx, ns2)).To(Succeed()) } var _ = BeforeSuite(func() { @@ -102,7 +123,20 @@ var _ = BeforeSuite(func() { filepath.Join("../../../", "test", "manifests"), }, } - memberEnv = &envtest.Environment{ + // memberEnv1 is the test environment for verifying most work applier behaviors. + memberEnv1 = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("../../../", "config", "crd", "bases"), + filepath.Join("../../../", "test", "manifests"), + }, + } + // memberEnv2 is the test environment for verifying the correctness of exponential backoff as + // enabled in the work applier. + // + // Note (chenyu1): to avoid flakiness, a separate test environment with a work applier of special + // exponential backoff setting is used so that the backoffs can be identified in a more + // apparent and controllable manner. + memberEnv2 = &envtest.Environment{ CRDDirectoryPaths: []string{ filepath.Join("../../../", "config", "crd", "bases"), filepath.Join("../../../", "test", "manifests"), @@ -114,9 +148,13 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(hubCfg).ToNot(BeNil()) - memberCfg, err = memberEnv.Start() + memberCfg1, err = memberEnv1.Start() Expect(err).ToNot(HaveOccurred()) - Expect(memberCfg).ToNot(BeNil()) + Expect(memberCfg1).ToNot(BeNil()) + + memberCfg2, err = memberEnv2.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(memberCfg2).ToNot(BeNil()) err = fleetv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) @@ -128,52 +166,125 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(hubClient).ToNot(BeNil()) - memberClient, err = client.New(memberCfg, client.Options{Scheme: scheme.Scheme}) + memberClient1, err = client.New(memberCfg1, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(memberClient1).ToNot(BeNil()) + + memberClient2, err = client.New(memberCfg2, client.Options{Scheme: scheme.Scheme}) Expect(err).ToNot(HaveOccurred()) - Expect(memberClient).ToNot(BeNil()) + Expect(memberClient2).ToNot(BeNil()) // This setup also requires a client-go dynamic client for the member cluster. - memberDynamicClient, err = dynamic.NewForConfig(memberCfg) + memberDynamicClient1, err = dynamic.NewForConfig(memberCfg1) + Expect(err).ToNot(HaveOccurred()) + + memberDynamicClient2, err = dynamic.NewForConfig(memberCfg2) Expect(err).ToNot(HaveOccurred()) By("Setting up the resources") setupResources() - By("Setting up the controller and the controller manager") - hubMgr, err = ctrl.NewManager(hubCfg, ctrl.Options{ + By("Setting up the controller and the controller manager for member cluster 1") + hubMgr1, err = ctrl.NewManager(hubCfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: server.Options{ + BindAddress: "0", + }, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + memberReservedNSName1: {}, + }, + }, + Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), + }) + Expect(err).ToNot(HaveOccurred()) + + workApplier1 = NewReconciler( + hubClient, + memberReservedNSName1, + memberDynamicClient1, + memberClient1, + memberClient1.RESTMapper(), + hubMgr1.GetEventRecorderFor("work-applier"), + maxConcurrentReconciles, + workerCount, + 30*time.Second, + true, + 60, + nil, // Use the default backoff rate limiter. + ) + Expect(workApplier1.SetupWithManager(hubMgr1)).To(Succeed()) + + By("Setting up the controller and the controller manager for member cluster 2") + hubMgr2, err = ctrl.NewManager(hubCfg, ctrl.Options{ Scheme: scheme.Scheme, Metrics: server.Options{ BindAddress: "0", }, Cache: cache.Options{ DefaultNamespaces: map[string]cache.Config{ - memberReservedNSName: {}, + memberReservedNSName2: {}, }, }, Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), }) Expect(err).ToNot(HaveOccurred()) - workApplier = NewReconciler( + superLongExponentialBackoffRateLimiter := NewRequeueMultiStageWithExponentialBackoffRateLimiter( + // Allow one attempt of backoff with fixed delay. + 1, + // Use a fixed delay of 10 seconds. + 10, + // Set the exponential backoff base factor to 1.5 for the slow backoff stage. + 1.5, + // Set the initial slow backoff delay to 20 seconds. + 20, + // Set the maximum slow backoff delay to 30 seconds (allow 2 slow backoffs). + 30, + // Set the exponential backoff base factor to 2 for the fast backoff stage. + 2, + // Set the maximum fast backoff delay to 90 seconds (allow 1 fast backoff). + 90, + // Allow skipping to fast backoff stage. + true, + ) + workApplier2 = NewReconciler( hubClient, - memberReservedNSName, - memberDynamicClient, - memberClient, - memberClient.RESTMapper(), - hubMgr.GetEventRecorderFor("work-applier"), + memberReservedNSName2, + memberDynamicClient2, + memberClient2, + memberClient2.RESTMapper(), + hubMgr2.GetEventRecorderFor("work-applier"), maxConcurrentReconciles, workerCount, 30*time.Second, true, 60, - defaultRequeueRateLimiter, + superLongExponentialBackoffRateLimiter, ) - Expect(workApplier.SetupWithManager(hubMgr)).To(Succeed()) + // Due to name conflicts, the second work applier must be set up manually. + err = ctrl.NewControllerManagedBy(hubMgr2).Named("work-applier-controller-duplicate"). + WithOptions(ctrloption.Options{ + MaxConcurrentReconciles: workApplier2.concurrentReconciles, + }). + For(&fleetv1beta1.Work{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Complete(workApplier2) + Expect(err).NotTo(HaveOccurred()) + + wg = sync.WaitGroup{} + wg.Add(2) + go func() { + defer GinkgoRecover() + defer wg.Done() + Expect(workApplier1.Join(ctx)).To(Succeed()) + Expect(hubMgr1.Start(ctx)).To(Succeed()) + }() go func() { defer GinkgoRecover() - Expect(workApplier.Join(ctx)).To(Succeed()) - Expect(hubMgr.Start(ctx)).To(Succeed()) + defer wg.Done() + Expect(workApplier2.Join(ctx)).To(Succeed()) + Expect(hubMgr2.Start(ctx)).To(Succeed()) }() }) @@ -181,7 +292,9 @@ var _ = AfterSuite(func() { defer klog.Flush() cancel() + wg.Wait() By("Tearing down the test environment") Expect(hubEnv.Stop()).To(Succeed()) - Expect(memberEnv.Stop()).To(Succeed()) + Expect(memberEnv1.Stop()).To(Succeed()) + Expect(memberEnv2.Stop()).To(Succeed()) }) diff --git a/test/e2e/placement_negative_cases_test.go b/test/e2e/placement_negative_cases_test.go index 389758bef..92ca627b9 100644 --- a/test/e2e/placement_negative_cases_test.go +++ b/test/e2e/placement_negative_cases_test.go @@ -32,11 +32,12 @@ var _ = Describe("handling errors and failures gracefully", func() { cmDataVal1 := "bar" cmDataVal2 := "baz" - // This test spec uses envelopes for placement as it is a bit tricky to simulate + // Many test specs below use envelopes for placement as it is a bit tricky to simulate // decoding errors with resources created directly in the hub cluster. // // TO-DO (chenyu1): reserve an API group exclusively on the hub cluster so that // envelopes do not need to be used for this test spec. + Context("pre-processing failure in apply ops (decoding errors)", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) @@ -69,8 +70,7 @@ var _ = Describe("handling errors and failures gracefully", func() { cmDataKey: cmDataVal1, }, } - // Given Fleet's current resource sorting logic, this configMap - // will be considered as the duplicated resource entry. + // Prepare a malformed config map. badConfigMap := configMap.DeepCopy() badConfigMap.TypeMeta = metav1.TypeMeta{ APIVersion: "dummy/v10", @@ -80,6 +80,7 @@ var _ = Describe("handling errors and failures gracefully", func() { Expect(err).To(BeNil(), "Failed to marshal configMap %s", badConfigMap.Name) resourceEnvelope.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} + // Prepare a regular config map. wrappedCM2 := configMap.DeepCopy() wrappedCM2.Name = wrappedCMName2 wrappedCM2.Data[cmDataKey] = cmDataVal2 @@ -87,7 +88,7 @@ var _ = Describe("handling errors and failures gracefully", func() { Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM2.Name) resourceEnvelope.Data["cm2.yaml"] = runtime.RawExtension{Raw: wrappedCM2Bytes} - Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create configMap %s", resourceEnvelope.Name) + Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create resource envelope %s", resourceEnvelope.Name) // Create a CRP. crp := &placementv1beta1.ClusterResourcePlacement{ @@ -254,7 +255,7 @@ var _ = Describe("handling errors and failures gracefully", func() { badCMBytes, err := json.Marshal(badConfigMap) Expect(err).To(BeNil(), "Failed to marshal configMap %s", badConfigMap.Name) resourceEnvelope.Data["cm1.yaml"] = runtime.RawExtension{Raw: badCMBytes} - Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create configMap %s", resourceEnvelope.Name) + Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create resource envelope %s", resourceEnvelope.Name) // Create a CRP. crp := &placementv1beta1.ClusterResourcePlacement{ @@ -342,4 +343,187 @@ var _ = Describe("handling errors and failures gracefully", func() { ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) }) }) + + Context("pre-processing failure in apply ops (duplicated)", Ordered, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + // Use an envelope to create duplicate resource entries. + ns := appNamespace() + Expect(hubClient.Create(ctx, &ns)).To(Succeed(), "Failed to create namespace %s", ns.Name) + + // Create an envelope resource to wrap the configMaps. + resourceEnvelope := &placementv1beta1.ResourceEnvelope{ + ObjectMeta: metav1.ObjectMeta{ + Name: envelopeName, + Namespace: ns.Name, + }, + Data: map[string]runtime.RawExtension{}, + } + + // Create configMaps as wrapped resources. + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + Name: wrappedCMName1, + }, + Data: map[string]string{ + cmDataKey: cmDataVal1, + }, + } + // Prepare a regular config map and a duplicate. + wrappedCM := configMap.DeepCopy() + wrappedCM.Name = wrappedCMName1 + wrappedCM.Data[cmDataKey] = cmDataVal1 + wrappedCMBytes, err := json.Marshal(wrappedCM) + Expect(err).To(BeNil(), "Failed to marshal configMap %s", wrappedCM.Name) + resourceEnvelope.Data["cm1.yaml"] = runtime.RawExtension{Raw: wrappedCMBytes} + + // Note: due to how work generator sorts manifests, the CM below will actually be + // applied first. + duplicatedCM := configMap.DeepCopy() + duplicatedCM.Name = wrappedCMName1 + duplicatedCM.Data[cmDataKey] = cmDataVal2 + duplicatedCMBytes, err := json.Marshal(duplicatedCM) + Expect(err).To(BeNil(), "Failed to marshal configMap %s", duplicatedCM.Name) + resourceEnvelope.Data["cm2.yaml"] = runtime.RawExtension{Raw: duplicatedCMBytes} + + Expect(hubClient.Create(ctx, resourceEnvelope)).To(Succeed(), "Failed to create resource envelope %s", resourceEnvelope.Name) + + // Create a CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.PlacementSpec{ + ResourceSelectors: workResourceSelector(), + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickFixedPlacementType, + ClusterNames: []string{ + memberCluster1EastProdName, + }, + }, + Strategy: placementv1beta1.RolloutStrategy{ + Type: placementv1beta1.RollingUpdateRolloutStrategyType, + RollingUpdate: &placementv1beta1.RollingUpdateConfig{ + UnavailablePeriodSeconds: ptr.To(2), + }, + }, + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP") + }) + + It("should update CRP status as expected", func() { + crpStatusUpdatedActual := func() error { + crp := &placementv1beta1.ClusterResourcePlacement{} + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpName}, crp); err != nil { + return err + } + + wantStatus := placementv1beta1.PlacementStatus{ + Conditions: crpAppliedFailedConditions(crp.Generation), + PerClusterPlacementStatuses: []placementv1beta1.PerClusterPlacementStatus{ + { + ClusterName: memberCluster1EastProdName, + ObservedResourceIndex: "0", + FailedPlacements: []placementv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: placementv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Namespace: workNamespaceName, + Name: wrappedCMName1, + Envelope: &placementv1beta1.EnvelopeIdentifier{ + Name: envelopeName, + Namespace: workNamespaceName, + Type: placementv1beta1.ResourceEnvelopeType, + }, + }, + Condition: metav1.Condition{ + Type: placementv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + Reason: string(workapplier.ApplyOrReportDiffResTypeDuplicated), + ObservedGeneration: 0, + }, + }, + }, + Conditions: perClusterApplyFailedConditions(crp.Generation), + }, + }, + SelectedResources: []placementv1beta1.ResourceIdentifier{ + { + Kind: "Namespace", + Name: workNamespaceName, + Version: "v1", + }, + { + Group: placementv1beta1.GroupVersion.Group, + Kind: placementv1beta1.ResourceEnvelopeKind, + Version: placementv1beta1.GroupVersion.Version, + Name: envelopeName, + Namespace: workNamespaceName, + }, + }, + ObservedResourceIndex: "0", + } + if diff := cmp.Diff(crp.Status, wantStatus, placementStatusCmpOptions...); diff != "" { + return fmt.Errorf("CRP status diff (-got, +want): %s", diff) + } + return nil + } + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update CRP status as expected") + Consistently(crpStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "CRP status has changed unexpectedly") + }) + + It("should place some manifests on member clusters", func() { + Eventually(func() error { + return validateWorkNamespaceOnCluster(memberCluster1EastProd, types.NamespacedName{Name: workNamespaceName}) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the namespace object") + + Eventually(func() error { + cm := &corev1.ConfigMap{} + if err := memberCluster1EastProdClient.Get(ctx, types.NamespacedName{Name: wrappedCMName1, Namespace: workNamespaceName}, cm); err != nil { + return err + } + + wantCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: wrappedCMName1, + Namespace: workNamespaceName, + }, + Data: map[string]string{ + cmDataKey: cmDataVal2, + }, + } + // Rebuild the configMap for ease of comparison. + rebuiltGotCM := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cm.Name, + Namespace: cm.Namespace, + }, + Data: cm.Data, + } + + if diff := cmp.Diff(rebuiltGotCM, wantCM); diff != "" { + return fmt.Errorf("configMap diff (-got, +want): %s", diff) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to apply the configMap object #1") + }) + + AfterAll(func() { + // Remove the CRP and the namespace from the hub cluster. + ensureCRPAndRelatedResourcesDeleted(crpName, []*framework.Cluster{memberCluster1EastProd}) + }) + }) }) From 32a30692f3e93996144045d55797f95c3cb4766c Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Wed, 10 Sep 2025 03:22:06 +1000 Subject: [PATCH 2/3] Added additional integration tests Signed-off-by: michaelawyu --- cmd/memberagent/main.go | 2 +- .../v1beta1/member_suite_test.go | 4 +- pkg/controllers/workapplier/controller.go | 13 +- .../controller_integration_test.go | 86 +- pkg/controllers/workapplier/process.go | 2 +- pkg/controllers/workapplier/suite_test.go | 120 +- .../workapplier/waves_integration_test.go | 1218 +++++++++++++++++ pkg/scheduler/framework/framework.go | 2 +- pkg/utils/parallelizer/parallelizer.go | 16 +- 9 files changed, 1404 insertions(+), 59 deletions(-) create mode 100644 pkg/controllers/workapplier/waves_integration_test.go diff --git a/cmd/memberagent/main.go b/cmd/memberagent/main.go index 3d560a18b..70d6efe8c 100644 --- a/cmd/memberagent/main.go +++ b/cmd/memberagent/main.go @@ -439,7 +439,7 @@ func Start(ctx context.Context, hubCfg, memberConfig *rest.Config, hubOpts, memb // resource processing. 5, // Use the default worker count (4) for parallelized manifest processing. - parallelizer.DefaultNumOfWorkers, + parallelizer.NewParallelizer(parallelizer.DefaultNumOfWorkers), time.Minute*time.Duration(*deletionWaitTime), *watchWorkWithPriorityQueue, *watchWorkReconcileAgeMinutes, diff --git a/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go b/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go index 6720e2f35..6176e4926 100644 --- a/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go +++ b/pkg/controllers/internalmembercluster/v1beta1/member_suite_test.go @@ -379,7 +379,7 @@ var _ = BeforeSuite(func() { // This controller is created for testing purposes only; no reconciliation loop is actually // run. - workApplier1 = workapplier.NewReconciler(hubClient, member1ReservedNSName, nil, nil, nil, nil, 0, 1, time.Minute, false, 60, nil) + workApplier1 = workapplier.NewReconciler(hubClient, member1ReservedNSName, nil, nil, nil, nil, 0, nil, time.Minute, false, 60, nil) propertyProvider1 = &manuallyUpdatedProvider{} member1Reconciler, err := NewReconciler(ctx, hubClient, member1Cfg, member1Client, workApplier1, propertyProvider1) @@ -402,7 +402,7 @@ var _ = BeforeSuite(func() { // This controller is created for testing purposes only; no reconciliation loop is actually // run. - workApplier2 = workapplier.NewReconciler(hubClient, member2ReservedNSName, nil, nil, nil, nil, 0, 1, time.Minute, false, 60, nil) + workApplier2 = workapplier.NewReconciler(hubClient, member2ReservedNSName, nil, nil, nil, nil, 0, nil, time.Minute, false, 60, nil) member2Reconciler, err := NewReconciler(ctx, hubClient, member2Cfg, member2Client, workApplier2, nil) Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/controllers/workapplier/controller.go b/pkg/controllers/workapplier/controller.go index 2aa30bd21..eff2bc942 100644 --- a/pkg/controllers/workapplier/controller.go +++ b/pkg/controllers/workapplier/controller.go @@ -49,7 +49,7 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" "github.com/kubefleet-dev/kubefleet/pkg/utils/controller" "github.com/kubefleet-dev/kubefleet/pkg/utils/defaulter" - "github.com/kubefleet-dev/kubefleet/pkg/utils/parallelizer" + parallelizerutil "github.com/kubefleet-dev/kubefleet/pkg/utils/parallelizer" ) const ( @@ -227,7 +227,7 @@ type Reconciler struct { watchWorkReconcileAgeMinutes int deletionWaitTime time.Duration joined *atomic.Bool - parallelizer *parallelizer.Parallerlizer + parallelizer parallelizerutil.Parallelizer requeueRateLimiter *RequeueMultiStageWithExponentialBackoffRateLimiter } @@ -237,15 +237,20 @@ func NewReconciler( spokeDynamicClient dynamic.Interface, spokeClient client.Client, restMapper meta.RESTMapper, recorder record.EventRecorder, concurrentReconciles int, - workerCount int, + parallelizer parallelizerutil.Parallelizer, deletionWaitTime time.Duration, watchWorkWithPriorityQueue bool, watchWorkReconcileAgeMinutes int, requeueRateLimiter *RequeueMultiStageWithExponentialBackoffRateLimiter, ) *Reconciler { if requeueRateLimiter == nil { + klog.V(2).InfoS("requeue rate limiter is not set; using the default rate limiter") requeueRateLimiter = defaultRequeueRateLimiter } + if parallelizer == nil { + klog.V(2).InfoS("parallelizer is not set; using the default parallelizer with a worker count of 1") + parallelizer = parallelizerutil.NewParallelizer(1) + } return &Reconciler{ hubClient: hubClient, @@ -254,7 +259,7 @@ func NewReconciler( restMapper: restMapper, recorder: recorder, concurrentReconciles: concurrentReconciles, - parallelizer: parallelizer.NewParallelizer(workerCount), + parallelizer: parallelizer, watchWorkWithPriorityQueue: watchWorkWithPriorityQueue, watchWorkReconcileAgeMinutes: watchWorkReconcileAgeMinutes, workNameSpace: workNameSpace, diff --git a/pkg/controllers/workapplier/controller_integration_test.go b/pkg/controllers/workapplier/controller_integration_test.go index e76537c39..f86170db9 100644 --- a/pkg/controllers/workapplier/controller_integration_test.go +++ b/pkg/controllers/workapplier/controller_integration_test.go @@ -72,6 +72,7 @@ var ( dummyLabelValue2 = "baz" dummyLabelValue3 = "quz" dummyLabelValue4 = "qux" + dummyLabelValue5 = "quux" ) // createWorkObject creates a new Work object with the given name, manifests, and apply strategy. @@ -381,6 +382,7 @@ func markDeploymentAsAvailable(nsName, deployName string) { } func workStatusUpdated( + memberReservedNSName string, workName string, workConds []metav1.Condition, manifestConds []fleetv1beta1.ManifestCondition, @@ -390,7 +392,7 @@ func workStatusUpdated( return func() error { // Retrieve the Work object. work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); err != nil { return fmt.Errorf("failed to retrieve the Work object: %w", err) } @@ -752,7 +754,7 @@ var _ = Describe("applying manifests", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -934,7 +936,7 @@ var _ = Describe("applying manifests", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -1027,7 +1029,7 @@ var _ = Describe("applying manifests", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -1224,7 +1226,7 @@ var _ = Describe("applying manifests", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -1404,7 +1406,7 @@ var _ = Describe("applying manifests", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -1579,7 +1581,7 @@ var _ = Describe("applying manifests", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -1749,7 +1751,7 @@ var _ = Describe("work applier garbage collection", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -2044,7 +2046,7 @@ var _ = Describe("work applier garbage collection", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -2363,7 +2365,7 @@ var _ = Describe("work applier garbage collection", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -2654,7 +2656,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -2936,7 +2938,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -3228,7 +3230,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -3384,7 +3386,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -3628,7 +3630,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -3762,7 +3764,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -3888,7 +3890,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4002,7 +4004,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4128,7 +4130,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4259,7 +4261,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4386,7 +4388,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4489,7 +4491,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4620,7 +4622,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4713,7 +4715,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") // Track the timestamp that was just after the drift was first detected. @@ -4787,7 +4789,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &driftObservedMustBeforeTimestamp, &firstDriftedMustBeforeTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &driftObservedMustBeforeTimestamp, &firstDriftedMustBeforeTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -4948,7 +4950,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -5073,7 +5075,7 @@ var _ = Describe("report diff", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -5305,7 +5307,7 @@ var _ = Describe("report diff", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -5395,7 +5397,7 @@ var _ = Describe("report diff", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -5610,7 +5612,7 @@ var _ = Describe("report diff", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -5756,7 +5758,7 @@ var _ = Describe("report diff", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -5990,7 +5992,7 @@ var _ = Describe("handling different apply strategies", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, &noLaterThanTimestamp, &noLaterThanTimestamp) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -6088,7 +6090,7 @@ var _ = Describe("handling different apply strategies", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -6302,7 +6304,7 @@ var _ = Describe("handling different apply strategies", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -6373,7 +6375,7 @@ var _ = Describe("handling different apply strategies", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -6591,7 +6593,7 @@ var _ = Describe("handling different apply strategies", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -6746,7 +6748,7 @@ var _ = Describe("handling different apply strategies", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -6982,7 +6984,7 @@ var _ = Describe("handling different apply strategies", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -7158,7 +7160,7 @@ var _ = Describe("negative cases", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") }) @@ -7321,7 +7323,7 @@ var _ = Describe("negative cases", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") }) @@ -7503,7 +7505,7 @@ var _ = Describe("negative cases", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") }) @@ -7734,7 +7736,7 @@ var _ = Describe("negative cases", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") }) diff --git a/pkg/controllers/workapplier/process.go b/pkg/controllers/workapplier/process.go index d58c183c2..01ae4d91f 100644 --- a/pkg/controllers/workapplier/process.go +++ b/pkg/controllers/workapplier/process.go @@ -82,7 +82,7 @@ func (r *Reconciler) processManifests( klog.V(2).InfoS("Processed a manifest", "manifestObj", klog.KObj(bundlesInWave[piece].manifestObj), "work", klog.KObj(work)) } - r.parallelizer.ParallelizeUntil(ctx, len(bundlesInWave), doWork, "processingManifests") + r.parallelizer.ParallelizeUntil(ctx, len(bundlesInWave), doWork, fmt.Sprintf("processingManifestsInWave%d", idx)) } } diff --git a/pkg/controllers/workapplier/suite_test.go b/pkg/controllers/workapplier/suite_test.go index c61e0d82a..1ba33cceb 100644 --- a/pkg/controllers/workapplier/suite_test.go +++ b/pkg/controllers/workapplier/suite_test.go @@ -20,6 +20,7 @@ import ( "context" "flag" "path/filepath" + "strings" "sync" "testing" "time" @@ -31,6 +32,7 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" "k8s.io/klog/v2/textlogger" ctrl "sigs.k8s.io/controller-runtime" @@ -45,6 +47,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils/parallelizer" testv1alpha1 "github.com/kubefleet-dev/kubefleet/test/apis/v1alpha1" ) @@ -69,6 +72,13 @@ var ( memberDynamicClient2 dynamic.Interface workApplier2 *Reconciler + memberCfg3 *rest.Config + memberEnv3 *envtest.Environment + hubMgr3 manager.Manager + memberClient3 client.Client + memberDynamicClient3 dynamic.Interface + workApplier3 *Reconciler + ctx context.Context cancel context.CancelFunc wg sync.WaitGroup @@ -82,8 +92,34 @@ const ( memberReservedNSName1 = "fleet-member-experimental-1" memberReservedNSName2 = "fleet-member-experimental-2" + memberReservedNSName3 = "fleet-member-experimental-3" + + parallelizerFixedDelay = 5 * time.Second ) +// parallelizerWithFixedDelay implements the parallelizer.Parallelizer interface that allows running +// tasks in parallel with a fixed delay after completing each task group. +// +// This is added to help verify the behavior of waved parallel processing in the work applier. +type parallelizerWithFixedDelay struct { + regularParallelizer parallelizer.Parallelizer + delay time.Duration +} + +func (p *parallelizerWithFixedDelay) ParallelizeUntil(ctx context.Context, pieces int, doWork workqueue.DoWorkPieceFunc, operation string) { + p.regularParallelizer.ParallelizeUntil(ctx, pieces, doWork, operation) + klog.V(2).InfoS("Parallelization completed, start to wait with a fixed delay", "operation", operation, "delay", p.delay) + // No need to add delay for non-waved operations. + if strings.HasPrefix(operation, "processingManifestsInWave") { + // Only log the delay for operations that are actually related to waves. + klog.V(2).InfoS("Waiting with a fixed delay after processing a wave", "operation", operation, "delay", p.delay) + time.Sleep(p.delay) + } +} + +// Verify that parallelizerWithFixedDelay implements the parallelizer.Parallelizer interface. +var _ parallelizer.Parallelizer = ¶llelizerWithFixedDelay{} + func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) @@ -104,6 +140,13 @@ func setupResources() { }, } Expect(hubClient.Create(ctx, ns2)).To(Succeed()) + + ns3 := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: memberReservedNSName3, + }, + } + Expect(hubClient.Create(ctx, ns3)).To(Succeed()) } var _ = BeforeSuite(func() { @@ -142,6 +185,14 @@ var _ = BeforeSuite(func() { filepath.Join("../../../", "test", "manifests"), }, } + // memberEnv3 is the test environment for verifying the behavior of waved parallel processing in + // the work applier. + memberEnv3 = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("../../../", "config", "crd", "bases"), + filepath.Join("../../../", "test", "manifests"), + }, + } var err error hubCfg, err = hubEnv.Start() @@ -156,6 +207,10 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(memberCfg2).ToNot(BeNil()) + memberCfg3, err = memberEnv3.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(memberCfg3).ToNot(BeNil()) + err = fleetv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) err = testv1alpha1.AddToScheme(scheme.Scheme) @@ -174,6 +229,10 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Expect(memberClient2).ToNot(BeNil()) + memberClient3, err = client.New(memberCfg3, client.Options{Scheme: scheme.Scheme}) + Expect(err).ToNot(HaveOccurred()) + Expect(memberClient3).ToNot(BeNil()) + // This setup also requires a client-go dynamic client for the member cluster. memberDynamicClient1, err = dynamic.NewForConfig(memberCfg1) Expect(err).ToNot(HaveOccurred()) @@ -181,6 +240,9 @@ var _ = BeforeSuite(func() { memberDynamicClient2, err = dynamic.NewForConfig(memberCfg2) Expect(err).ToNot(HaveOccurred()) + memberDynamicClient3, err = dynamic.NewForConfig(memberCfg3) + Expect(err).ToNot(HaveOccurred()) + By("Setting up the resources") setupResources() @@ -207,7 +269,7 @@ var _ = BeforeSuite(func() { memberClient1.RESTMapper(), hubMgr1.GetEventRecorderFor("work-applier"), maxConcurrentReconciles, - workerCount, + parallelizer.NewParallelizer(workerCount), 30*time.Second, true, 60, @@ -256,7 +318,7 @@ var _ = BeforeSuite(func() { memberClient2.RESTMapper(), hubMgr2.GetEventRecorderFor("work-applier"), maxConcurrentReconciles, - workerCount, + parallelizer.NewParallelizer(workerCount), 30*time.Second, true, 60, @@ -271,8 +333,52 @@ var _ = BeforeSuite(func() { Complete(workApplier2) Expect(err).NotTo(HaveOccurred()) + By("Setting up the controller and the controller manager for member cluster 3") + hubMgr3, err = ctrl.NewManager(hubCfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: server.Options{ + BindAddress: "0", + }, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + memberReservedNSName3: {}, + }, + }, + Logger: textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(4))), + }) + Expect(err).ToNot(HaveOccurred()) + + pWithDelay := ¶llelizerWithFixedDelay{ + regularParallelizer: parallelizer.NewParallelizer(parallelizer.DefaultNumOfWorkers), + // To avoid flakiness, use a fixed delay of 5 seconds so that we could reliably verify + // if manifests are actually being processed in waves. + delay: parallelizerFixedDelay, + } + workApplier3 = NewReconciler( + hubClient, + memberReservedNSName3, + memberDynamicClient3, + memberClient3, + memberClient3.RESTMapper(), + hubMgr3.GetEventRecorderFor("work-applier"), + maxConcurrentReconciles, + pWithDelay, + 30*time.Second, + true, + 60, + nil, // Use the default backoff rate limiter. + ) + // Due to name conflicts, the third work applier must be set up manually. + err = ctrl.NewControllerManagedBy(hubMgr3).Named("work-applier-controller-waved-parallel-processing"). + WithOptions(ctrloption.Options{ + MaxConcurrentReconciles: workApplier3.concurrentReconciles, + }). + For(&fleetv1beta1.Work{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Complete(workApplier3) + Expect(err).NotTo(HaveOccurred()) + wg = sync.WaitGroup{} - wg.Add(2) + wg.Add(3) go func() { defer GinkgoRecover() defer wg.Done() @@ -286,6 +392,13 @@ var _ = BeforeSuite(func() { Expect(workApplier2.Join(ctx)).To(Succeed()) Expect(hubMgr2.Start(ctx)).To(Succeed()) }() + + go func() { + defer GinkgoRecover() + defer wg.Done() + Expect(workApplier3.Join(ctx)).To(Succeed()) + Expect(hubMgr3.Start(ctx)).To(Succeed()) + }() }) var _ = AfterSuite(func() { @@ -297,4 +410,5 @@ var _ = AfterSuite(func() { Expect(hubEnv.Stop()).To(Succeed()) Expect(memberEnv1.Stop()).To(Succeed()) Expect(memberEnv2.Stop()).To(Succeed()) + Expect(memberEnv3.Stop()).To(Succeed()) }) diff --git a/pkg/controllers/workapplier/waves_integration_test.go b/pkg/controllers/workapplier/waves_integration_test.go new file mode 100644 index 000000000..82bc01bcb --- /dev/null +++ b/pkg/controllers/workapplier/waves_integration_test.go @@ -0,0 +1,1218 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workapplier + +import ( + "fmt" + "math/rand/v2" + "slices" + "time" + + "github.com/google/go-cmp/cmp" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + networkingv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1" + rbacv1 "k8s.io/api/rbac/v1" + schedulingv1 "k8s.io/api/scheduling/v1" + storagev1 "k8s.io/api/storage/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils" + "github.com/kubefleet-dev/kubefleet/pkg/utils/condition" +) + +// Note (chenyu1): all test cases in this file use a separate test environment +// (same hub cluster, different fleet member reserved namespace, different +// work applier instance) from the other integration tests. This is needed +// to (relatively speaking) reliably verify the wave-based parallel processing +// in the work applier. + +var _ = Describe("parallel processing with waves", func() { + Context("single wave", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + pcName := "priority-class-1" + + BeforeAll(func() { + // Prepare a NS object. + regularNS := ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Prepare a PriorityClass object. + regularPC := &schedulingv1.PriorityClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "scheduling.k8s.io/v1", + Kind: "PriorityClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: pcName, + }, + Value: 1000, + GlobalDefault: false, + Description: "Experimental priority class", + } + regularPCJSON := marshalK8sObjJSON(regularPC) + + // Create a new Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName3, nil, regularNSJSON, regularPCJSON) + }) + + // For simplicity reasons, this test case will skip some of the regular apply op result verification + // (finalizer check, AppliedWork object check, etc.), as they have been repeatedly verified in different + // test cases under similar conditions. + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAppliedReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAvailableReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "scheduling.k8s.io", + Version: "v1", + Kind: "PriorityClass", + Resource: "priorityclasses", + Name: pcName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 1, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 1, + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName3, workName, workConds, manifestConds, nil, nil) + // Considering the presence of fixed delay in the parallelizer, the test case here + // uses a longer timeout and interval. + Eventually(workStatusUpdatedActual, eventuallyDuration*2, eventuallyInterval*2).Should(Succeed(), "Failed to update work status") + }) + + It("should create resources in parallel", func() { + // The work applier in use for this environment is set to wait for a fixed delay between each + // parallelizer call. If the parallelization is set up correctly, resources in the same wave + // should have very close creation timestamps, while the creation timestamps between resources + // in different waves should have a consistent gap (roughly the fixed delay). + + placedNS := &corev1.Namespace{} + Eventually(memberClient3.Get(ctx, types.NamespacedName{Name: nsName}, placedNS), eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get the placed Namespace") + + placedPC := &schedulingv1.PriorityClass{} + Eventually(memberClient3.Get(ctx, types.NamespacedName{Name: pcName}, placedPC), eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get the placed PriorityClass") + + gap := placedPC.CreationTimestamp.Sub(placedNS.CreationTimestamp.Time) + // The two objects belong to the same wave; the creation timestamps should be very close. + Expect(gap).To(BeNumerically("<=", time.Second), "The creation time gap between resources in the same wave is greater than or equal to the fixed delay") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName3) + + // Remove the PriorityClass object if it still exists. + Eventually(func() error { + pc := &schedulingv1.PriorityClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: pcName, + }, + } + return memberClient3.Delete(ctx, pc) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the PriorityClass object") + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt to verify its deletion. + }) + }) + + Context("two consecutive waves", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + BeforeAll(func() { + // Prepare a NS object. + regularNS := ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Prepare a ConfigMap object. + regularCM := configMap.DeepCopy() + regularCM.Namespace = nsName + regularCMJSON := marshalK8sObjJSON(regularCM) + + // Create a new Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName3, nil, regularNSJSON, regularCMJSON) + }) + + // For simplicity reasons, this test case will skip some of the regular apply op result verification + // (finalizer check, AppliedWork object check, etc.), as they have been repeatedly verified in different + // test cases under similar conditions. + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAppliedReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAvailableReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "", + Version: "v1", + Kind: "ConfigMap", + Resource: "configmaps", + Name: configMapName, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName3, workName, workConds, manifestConds, nil, nil) + // Considering the presence of fixed delay in the parallelizer, the test case here + // uses a longer timeout and interval. + Eventually(workStatusUpdatedActual, eventuallyDuration*2, eventuallyInterval*2).Should(Succeed(), "Failed to update work status") + }) + + It("should create resources in waves", func() { + // The work applier in use for this environment is set to wait for a fixed delay between each + // parallelizer call. If the parallelization is set up correctly, resources in the same wave + // should have very close creation timestamps, while the creation timestamps between resources + // in different waves should have a consistent gap (roughly the fixed delay). + + placedNS := &corev1.Namespace{} + Eventually(memberClient3.Get(ctx, types.NamespacedName{Name: nsName}, placedNS), eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get the placed Namespace") + + placedCM := &corev1.ConfigMap{} + Eventually(memberClient3.Get(ctx, types.NamespacedName{Namespace: nsName, Name: configMapName}, placedCM), eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get the placed ConfigMap") + + gap := placedCM.CreationTimestamp.Sub(placedNS.CreationTimestamp.Time) + Expect(gap).To(BeNumerically(">=", parallelizerFixedDelay), "The creation time gap between resources in different waves is less than the fixed delay") + Expect(gap).To(BeNumerically("<", parallelizerFixedDelay*2), "The creation time gap between resources in different waves is at least twice as large as the fixed delay") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName3) + + // Remove the ConfigMap object if it still exists. + cmRemovedActual := regularConfigMapRemovedActual(nsName, configMapName) + Eventually(cmRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the ConfigMap object") + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt to verify its deletion. + }) + }) + + Context("two non-consecutive waves", Ordered, func() { + workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + roleName := "role-1" + + BeforeAll(func() { + // Prepare a NS object. + regularNS := ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + + // Prepare a Role object. + regularRole := &rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "Role", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: nsName, + }, + Rules: []rbacv1.PolicyRule{}, + } + regularRoleJSON := marshalK8sObjJSON(regularRole) + + // Create a new Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName3, nil, regularNSJSON, regularRoleJSON) + }) + + // For simplicity reasons, this test case will skip some of the regular apply op result verification + // (finalizer check, AppliedWork object check, etc.), as they have been repeatedly verified in different + // test cases under similar conditions. + + It("should update the Work object status", func() { + // Prepare the status information. + workConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAppliedReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAvailableReason, + }, + } + manifestConds := []fleetv1beta1.ManifestCondition{ + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 0, + Group: "", + Version: "v1", + Kind: "Namespace", + Resource: "namespaces", + Name: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + { + Identifier: fleetv1beta1.WorkResourceIdentifier{ + Ordinal: 1, + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "Role", + Resource: "roles", + Name: roleName, + Namespace: nsName, + }, + Conditions: []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: string(ApplyOrReportDiffResTypeApplied), + ObservedGeneration: 0, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: string(AvailabilityResultTypeAvailable), + ObservedGeneration: 0, + }, + }, + }, + } + + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName3, workName, workConds, manifestConds, nil, nil) + // Considering the presence of fixed delay in the parallelizer, the test case here + // uses a longer timeout and interval. + Eventually(workStatusUpdatedActual, eventuallyDuration*2, eventuallyInterval*2).Should(Succeed(), "Failed to update work status") + }) + + It("should create resources in waves", func() { + // The work applier in use for this environment is set to wait for a fixed delay between each + // parallelizer call. If the parallelization is set up correctly, resources in the same wave + // should have very close creation timestamps, while the creation timestamps between resources + // in different waves should have a consistent gap (roughly the fixed delay). + + placedNS := &corev1.Namespace{} + Eventually(memberClient3.Get(ctx, types.NamespacedName{Name: nsName}, placedNS), eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get the placed Namespace") + + placedRole := &rbacv1.Role{} + Eventually(memberClient3.Get(ctx, types.NamespacedName{Namespace: nsName, Name: roleName}, placedRole), eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to get the placed Role") + + gap := placedRole.CreationTimestamp.Sub(placedNS.CreationTimestamp.Time) + Expect(gap).To(BeNumerically(">=", parallelizerFixedDelay), "The creation time gap between resources in different waves is less than the fixed delay") + Expect(gap).To(BeNumerically("<", parallelizerFixedDelay*2), "The creation time gap between resources in different waves is at least twice as large as the fixed delay") + }) + + AfterAll(func() { + // Delete the Work object and related resources. + deleteWorkObject(workName, memberReservedNSName3) + + // Remove the Role object if it still exists. + Eventually(func() error { + cr := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: nsName, + }, + } + if err := memberClient3.Delete(ctx, cr); err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("failed to deleete the role object: %w", err) + } + + // Check that the Role object has been deleted. + if err := memberClient3.Get(ctx, types.NamespacedName{Namespace: nsName, Name: roleName}, cr); !errors.IsNotFound(err) { + return fmt.Errorf("role object still exists or an unexpected error occurred: %w", err) + } + return nil + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Role object") + + // Ensure that the AppliedWork object has been removed. + appliedWorkRemovedActual := appliedWorkRemovedActual(workName, nsName) + Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + + workRemovedActual := workRemovedActual(workName) + Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") + // The environment prepared by the envtest package does not support namespace + // deletion; consequently this test suite would not attempt to verify its deletion. + }) + }) + + Context("all waves", Ordered, func() { + //workName := fmt.Sprintf(workNameTemplate, utils.RandStr()) + // The environment prepared by the envtest package does not support namespace + // deletion; each test case would use a new namespace. + nsName := fmt.Sprintf(nsNameTemplate, utils.RandStr()) + + // The array below includes objects of all known resource types for waved + // processing, plus a few objects of unknown resource types. + objectsOfVariousResourceTypes := []client.Object{ + // Wave 0 objects. + // Namespace object is created separately. + &schedulingv1.PriorityClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "scheduling.k8s.io/v1", + Kind: "PriorityClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pc-%s", utils.RandStr()), + }, + Value: 1000, + }, + // Wave 1 objects. + &networkingv1.NetworkPolicy{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "networking.k8s.io/v1", + Kind: "NetworkPolicy", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("np-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + }, + }, + &corev1.ResourceQuota{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ResourceQuota", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rq-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: corev1.ResourceQuotaSpec{}, + }, + &corev1.LimitRange{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "LimitRange", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("lr-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: corev1.LimitRangeSpec{}, + }, + &policyv1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "policy/v1", + Kind: "PodDisruptionBudget", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pdb-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + Selector: &metav1.LabelSelector{}, + }, + }, + &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("sa-%s", utils.RandStr()), + Namespace: nsName, + }, + }, + &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("secret-%s", utils.RandStr()), + Namespace: nsName, + }, + Type: corev1.SecretTypeOpaque, + }, + &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cm-%s", utils.RandStr()), + Namespace: nsName, + }, + }, + &storagev1.StorageClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "storage.k8s.io/v1", + Kind: "StorageClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("sc-%s", utils.RandStr()), + }, + Provisioner: "kubernetes.io/no-provisioner", + }, + &corev1.PersistentVolume{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolume", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pv-%s", utils.RandStr()), + }, + Spec: corev1.PersistentVolumeSpec{ + Capacity: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + PersistentVolumeSource: corev1.PersistentVolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/mnt/data", + }, + }, + }, + }, + &corev1.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pvc-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + &apiextensionsv1.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apiextensions.k8s.io/v1", + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bars.example.com", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "example.com", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Plural: "bars", + Kind: "Bar", + Singular: "bar", + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{{ + Name: "v1", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "spec": { + Type: "object", + Properties: map[string]apiextensionsv1.JSONSchemaProps{ + "placeholder": { + Type: "string", + }, + }, + }, + }, + }, + }, + }}, + }, + }, + &networkingv1.IngressClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "networking.k8s.io/v1", + Kind: "IngressClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ic-%s", utils.RandStr()), + }, + Spec: networkingv1.IngressClassSpec{ + Controller: "example.com/ingress-controller", + }, + }, + // Wave 2 objects. + &rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cr-%s", utils.RandStr()), + }, + Rules: []rbacv1.PolicyRule{}, + }, + &rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "Role", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("role-%s", utils.RandStr()), + Namespace: nsName, + }, + Rules: []rbacv1.PolicyRule{}, + }, + // Wave 3 objects. + &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("crb-%s", utils.RandStr()), + }, + Subjects: []rbacv1.Subject{}, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "dummy", + }, + }, + &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "RoleBinding", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rb-%s", utils.RandStr()), + Namespace: nsName, + }, + Subjects: []rbacv1.Subject{}, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: "dummy", + }, + }, + // Wave 4 objects. + &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("svc-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: corev1.ServiceSpec{ + Selector: map[string]string{}, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 80, + }, + }, + }, + }, + &appsv1.DaemonSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "DaemonSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ds-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + dummyLabelKey: dummyLabelValue1, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + dummyLabelKey: dummyLabelValue1, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + }, + }, + &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + &corev1.ReplicationController{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ReplicationController", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rc-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: corev1.ReplicationControllerSpec{ + Selector: map[string]string{ + dummyLabelKey: dummyLabelValue2, + }, + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + dummyLabelKey: dummyLabelValue2, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + }, + }, + &appsv1.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "ReplicaSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("rs-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: appsv1.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + dummyLabelKey: dummyLabelValue3, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + dummyLabelKey: dummyLabelValue3, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + }, + }, + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("deploy-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + dummyLabelKey: dummyLabelValue4, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + dummyLabelKey: dummyLabelValue4, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + }, + }, + &autoscalingv1.HorizontalPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "autoscaling/v1", + Kind: "HorizontalPodAutoscaler", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("hpa-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "dummy", + }, + MaxReplicas: 10, + MinReplicas: ptr.To(int32(1)), + }, + }, + &appsv1.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "StatefulSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("sts-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + dummyLabelKey: dummyLabelValue5, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + dummyLabelKey: dummyLabelValue5, + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + }, + }, + }, + }, + &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "batch/v1", + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("job-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + }, + &batchv1.CronJob{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "batch/v1", + Kind: "CronJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cj-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: batchv1.CronJobSpec{ + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox", + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + }, + Schedule: "*/1 * * * *", + }, + }, + &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "networking.k8s.io/v1", + Kind: "Ingress", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ing-%s", utils.RandStr()), + Namespace: nsName, + }, + Spec: networkingv1.IngressSpec{ + Rules: []networkingv1.IngressRule{ + { + Host: "example.com", + IngressRuleValue: networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + { + Path: "/", + PathType: ptr.To(networkingv1.PathTypePrefix), + Backend: networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "placeholder", + Port: networkingv1.ServiceBackendPort{ + Number: 80, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + // Wave 5 objects. + // The APIService object is not included due to setup complications. + &admissionregistrationv1.ValidatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "admissionregistration.k8s.io/v1", + Kind: "ValidatingWebhookConfiguration", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("vwc-%s", utils.RandStr()), + }, + Webhooks: []admissionregistrationv1.ValidatingWebhook{}, + }, + &admissionregistrationv1.MutatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "admissionregistration.k8s.io/v1", + Kind: "MutatingWebhookConfiguration", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("mwc-%s", utils.RandStr()), + }, + Webhooks: []admissionregistrationv1.MutatingWebhook{}, + }, + // Unknown resource types (no wave assigned by default); should always get processed at last. + &discoveryv1.EndpointSlice{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "discovery.k8s.io/v1", + Kind: "EndpointSlice", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("eps-%s", utils.RandStr()), + Namespace: nsName, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Endpoints: []discoveryv1.Endpoint{}, + }, + } + + BeforeAll(func() { + allManifestJSONByteArrs := make([][]byte, 0, len(objectsOfVariousResourceTypes)+1) + + // Prepare a NS object. + regularNS := ns.DeepCopy() + regularNS.Name = nsName + regularNSJSON := marshalK8sObjJSON(regularNS) + allManifestJSONByteArrs = append(allManifestJSONByteArrs, regularNSJSON) + + // Prepare all other objects. + for idx := range objectsOfVariousResourceTypes { + obj := objectsOfVariousResourceTypes[idx] + allManifestJSONByteArrs = append(allManifestJSONByteArrs, marshalK8sObjJSON(obj)) + } + // Shuffle the manifest JSONs. + rand.Shuffle(len(allManifestJSONByteArrs), func(i, j int) { + allManifestJSONByteArrs[i], allManifestJSONByteArrs[j] = allManifestJSONByteArrs[j], allManifestJSONByteArrs[i] + }) + + // Create a new Work object with all the manifest JSONs. + createWorkObject(workName, memberReservedNSName3, nil, allManifestJSONByteArrs...) + }) + + // For simplicity reasons, this test case will skip some of the regular apply op result verification + // (finalizer check, AppliedWork object check, etc.), as they have been repeatedly verified in different + // test cases under similar conditions. + + It("should update the Work object status", func() { + Eventually(func() error { + work := &fleetv1beta1.Work{} + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName3}, work); err != nil { + return fmt.Errorf("failed to retrieve the Work object: %w", err) + } + + // Compare only the work conditions for simplicity reasons. + wantWorkConds := []metav1.Condition{ + { + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionTrue, + Reason: condition.WorkAllManifestsAppliedReason, + }, + { + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + // In the current test environment some API objects will never become available. + Reason: condition.WorkNotAllManifestsAvailableReason, + }, + } + for idx := range wantWorkConds { + wantWorkConds[idx].ObservedGeneration = work.Generation + } + if diff := cmp.Diff( + work.Status.Conditions, wantWorkConds, + ignoreFieldConditionLTTMsg, + ); diff != "" { + return fmt.Errorf("Work status conditions mismatch (-got, +want):\n%s", diff) + } + return nil + }, eventuallyDuration*10, eventuallyInterval*5).Should(Succeed(), "Failed to update work status") + }) + + It("should process manifests in waves", func() { + creationTimestampsPerWave := make(map[waveNumber][]metav1.Time, len(defaultWaveNumberByResourceType)) + for idx := range objectsOfVariousResourceTypes { + obj := objectsOfVariousResourceTypes[idx] + objGK := obj.GetObjectKind().GroupVersionKind().GroupKind() + objVer := obj.GetObjectKind().GroupVersionKind().Version + objResTyp, err := memberClient3.RESTMapper().RESTMapping(objGK, objVer) + Expect(err).NotTo(HaveOccurred(), "Failed to get the resource type of an object") + + processedObj := obj.DeepCopyObject().(client.Object) + Expect(memberClient3.Get(ctx, client.ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}, processedObj)).To(Succeed(), "Failed to get a placed object") + + waveNum, ok := defaultWaveNumberByResourceType[objResTyp.Resource.Resource] + if !ok { + waveNum = lastWave + } + + timestamps := creationTimestampsPerWave[waveNum] + if timestamps == nil { + timestamps = make([]metav1.Time, 0, 1) + } + timestamps = append(timestamps, processedObj.GetCreationTimestamp()) + creationTimestampsPerWave[waveNum] = timestamps + } + + expectedWaveNums := []waveNumber{0, 1, 2, 3, 4, 5, lastWave} + // Do a sanity check. + Expect(len(creationTimestampsPerWave)).To(Equal(len(expectedWaveNums)), "The number of waves does not match the expectation") + var observedLatestCreationTimestampInLastWave *metav1.Time + for _, waveNum := range expectedWaveNums { + By(fmt.Sprintf("checking wave %d", waveNum)) + + timestamps := creationTimestampsPerWave[waveNum] + // Do a sanity check. + Expect(timestamps).NotTo(BeEmpty(), "No creation timestamps recorded for a wave") + + // Check that timestamps in the same wave are close enough. + slices.SortFunc(timestamps, func(a, b metav1.Time) int { + return a.Time.Compare(b.Time) + }) + + earliest := timestamps[0] + latest := timestamps[len(timestamps)-1] + gapWithinWave := latest.Sub(earliest.Time) + // Normally all resources in the same wave should be created within a very short time window, + // usually within a few tens of milliseconds; here the test spec uses a more forgiving threshold + // of 2 seconds to avoid flakiness. + Expect(gapWithinWave).To(BeNumerically("<", time.Second*2), "The creation time gap between resources in the same wave is larger than expected") + + if observedLatestCreationTimestampInLastWave != nil { + // Check that the current wave is processed after the last wave with a fixed delay. + gapBetweenWaves := earliest.Sub(observedLatestCreationTimestampInLastWave.Time) + Expect(gapBetweenWaves).To(BeNumerically(">=", parallelizerFixedDelay), "The creation time gap between resources in different waves is less than the fixed delay") + Expect(gapBetweenWaves).To(BeNumerically("<", parallelizerFixedDelay*2), "The creation time gap between resources in different waves is at least twice as large as the fixed delay") + } + + observedLatestCreationTimestampInLastWave = ×tamps[len(timestamps)-1] + } + }) + }) +}) diff --git a/pkg/scheduler/framework/framework.go b/pkg/scheduler/framework/framework.go index 847269fd4..c66270967 100644 --- a/pkg/scheduler/framework/framework.go +++ b/pkg/scheduler/framework/framework.go @@ -117,7 +117,7 @@ type framework struct { eventRecorder record.EventRecorder // parallelizer is a utility which helps run tasks in parallel. - parallelizer *parallelizer.Parallerlizer + parallelizer parallelizer.Parallelizer // eligibilityChecker is a utility which helps determine if a cluster is eligible for resource placement. clusterEligibilityChecker *clustereligibilitychecker.ClusterEligibilityChecker diff --git a/pkg/utils/parallelizer/parallelizer.go b/pkg/utils/parallelizer/parallelizer.go index f6fd331d0..4c26fb43d 100644 --- a/pkg/utils/parallelizer/parallelizer.go +++ b/pkg/utils/parallelizer/parallelizer.go @@ -30,19 +30,25 @@ const ( ) // Parallelizer helps run tasks in parallel. -type Parallerlizer struct { +type Parallelizer interface { + // ParallelizeUntil runs tasks in parallel, wrapping workqueue.ParallelizeUntil. + ParallelizeUntil(ctx context.Context, pieces int, doWork workqueue.DoWorkPieceFunc, operation string) +} + +// Parallelizer helps run tasks in parallel. +type parallelizer struct { numOfWorkers int } -// NewParallelizer returns a Parallelizer for running tasks in parallel. -func NewParallelizer(workers int) *Parallerlizer { - return &Parallerlizer{ +// NewParallelizer returns a parallelizer for running tasks in parallel. +func NewParallelizer(workers int) *parallelizer { + return ¶llelizer{ numOfWorkers: workers, } } // ParallelizeUntil wraps workqueue.ParallelizeUntil for running tasks in parallel. -func (p *Parallerlizer) ParallelizeUntil(ctx context.Context, pieces int, doWork workqueue.DoWorkPieceFunc, operation string) { +func (p *parallelizer) ParallelizeUntil(ctx context.Context, pieces int, doWork workqueue.DoWorkPieceFunc, operation string) { doWorkWithLogs := func(piece int) { klog.V(4).Infof("run piece %d for operation %s", piece, operation) doWork(piece) From 3c6ee73b6349adfb6bdfe85fa8bbd91d49fbeb43 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Tue, 30 Sep 2025 17:41:02 +1000 Subject: [PATCH 3/3] Minor conflict fixes Signed-off-by: michaelawyu --- pkg/controllers/workapplier/controller_integration_test.go | 6 +++--- pkg/controllers/workapplier/suite_test.go | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/workapplier/controller_integration_test.go b/pkg/controllers/workapplier/controller_integration_test.go index 9dced9293..229571ed8 100644 --- a/pkg/controllers/workapplier/controller_integration_test.go +++ b/pkg/controllers/workapplier/controller_integration_test.go @@ -447,7 +447,7 @@ func workStatusUpdated( return func() error { // Retrieve the Work object. work := &fleetv1beta1.Work{} - if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName1}, work); err != nil { + if err := hubClient.Get(ctx, client.ObjectKey{Name: workName, Namespace: memberReservedNSName}, work); err != nil { return fmt.Errorf("failed to retrieve the Work object: %w", err) } @@ -3866,7 +3866,7 @@ var _ = Describe("drift detection and takeover", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") }) @@ -8057,7 +8057,7 @@ var _ = Describe("negative cases", func() { }, } - workStatusUpdatedActual := workStatusUpdated(workName, memberReservedNSName1, workConds, manifestConds, nil, nil) + workStatusUpdatedActual := workStatusUpdated(memberReservedNSName1, workName, workConds, manifestConds, nil, nil) Eventually(workStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update work status") Consistently(workStatusUpdatedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work status changed unexpectedly") }) diff --git a/pkg/controllers/workapplier/suite_test.go b/pkg/controllers/workapplier/suite_test.go index adc281807..ecab1d12f 100644 --- a/pkg/controllers/workapplier/suite_test.go +++ b/pkg/controllers/workapplier/suite_test.go @@ -94,6 +94,8 @@ const ( memberReservedNSName1 = "fleet-member-experimental-1" memberReservedNSName2 = "fleet-member-experimental-2" memberReservedNSName3 = "fleet-member-experimental-3" + + parallelizerFixedDelay = time.Second * 5 ) // tasks in parallel with a fixed delay after completing each task group. @@ -356,7 +358,7 @@ var _ = BeforeSuite(func() { regularParallelizer: parallelizer.NewParallelizer(parallelizer.DefaultNumOfWorkers), // To avoid flakiness, use a fixed delay of 5 seconds so that we could reliably verify // if manifests are actually being processed in waves. - delay: time.Second * 5, + delay: parallelizerFixedDelay, } workApplier3 = NewReconciler( hubClient,