Skip to content

Commit 3b35a2c

Browse files
authored
CLOUDP-359062 - add ownerrefs and only deleteAll in multicluster (#620)
# Summary fixes: #589 This pull request introduces important improvements to how Kubernetes resources are managed and cleaned up for MongoDB and Ops Manager deployments, especially in multi-cluster versus single-cluster scenarios. The main focus is on correctly setting owner references for resources to enable automatic garbage collection by Kubernetes, and ensuring explicit resource deletion only occurs when necessary (i.e., in multi-cluster setups where owner references cannot span clusters). **Owner Reference Management & Resource Cleanup** * Added missing owner references (`ownerReferences`) to all relevant ConfigMaps and resources for MongoDB and Ops Manager, ensuring proper automatic cleanup by Kubernetes in single-cluster deployments. * Refactored constructors and initialization logic across controllers (`appdbreplicaset_controller.go`, `mongodbopsmanager_controller.go`, `mongodbshardedcluster_controller.go`, `state_store.go`) to consistently pass and set `ownerReferences`. **Multi-Cluster vs Single-Cluster Resource Deletion Logic** * Updated deletion logic in controllers so that explicit resource deletion is only performed in multi-cluster deployments; single-cluster setups now rely solely on owner references for cleanup. ## Proof of Work - unit tests - relying on kubernetes GC working properly ## Checklist - [x] Have you linked a jira ticket and/or is the ticket in the title? - [x] Have you checked whether your jira ticket required DOCSP changes? - [x] Have you added changelog file? - use `skip-changelog` label if not needed - refer to [Changelog files and Release Notes](https://github.com/mongodb/mongodb-kubernetes/blob/master/CONTRIBUTING.md#changelog-files-and-release-notes) section in CONTRIBUTING.md for more details
1 parent f1e1c5a commit 3b35a2c

13 files changed

+178
-36
lines changed

api/v1/mdb/mongodb_types.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,13 @@ func (m *MongoDB) GetOwnerLabels() map[string]string {
171171
}
172172
}
173173

174+
// GetKind returns the Kind of the MongoDB resource. This is needed because
175+
// when objects are retrieved from the Kubernetes API, the TypeMeta
176+
// (which contains Kind and APIVersion) is not populated.
177+
func (m *MongoDB) GetKind() string {
178+
return "MongoDB"
179+
}
180+
174181
// GetSecretsMountedIntoDBPod returns a list of all the optional secret names that are used by this resource.
175182
func (m *MongoDB) GetSecretsMountedIntoDBPod() []string {
176183
secrets := []string{}

api/v1/mdbmulti/mongodb_multi_types.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -180,6 +180,13 @@ func (m *MongoDBMultiCluster) GetOwnerLabels() map[string]string {
180180
}
181181
}
182182

183+
// GetKind returns the Kind of the MongoDBMultiCluster resource. This is needed because
184+
// when objects are retrieved from the Kubernetes API, the TypeMeta
185+
// (which contains Kind and APIVersion) is not populated.
186+
func (m *MongoDBMultiCluster) GetKind() string {
187+
return "MongoDBMultiCluster"
188+
}
189+
183190
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
184191
type MongoDBMultiClusterList struct {
185192
metav1.TypeMeta `json:",inline"`

api/v1/om/opsmanager_types.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -349,6 +349,13 @@ func (om *MongoDBOpsManager) GetOwnerLabels() map[string]string {
349349
}
350350
}
351351

352+
// GetKind returns the Kind of the MongoDBOpsManager resource. This is needed because
353+
// when objects are retrieved from the Kubernetes API, the TypeMeta
354+
// (which contains Kind and APIVersion) is not populated.
355+
func (om *MongoDBOpsManager) GetKind() string {
356+
return "MongoDBOpsManager"
357+
}
358+
352359
// MongoDBOpsManagerServiceDefinition struct that defines the mechanism by which this Ops Manager resource
353360
// is exposed, via a Service, to the outside of the Kubernetes Cluster.
354361
type MongoDBOpsManagerServiceDefinition struct {

api/v1/owner.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,4 +14,8 @@ type ResourceOwner interface {
1414
type ObjectOwner interface {
1515
ResourceOwner
1616
client.Object
17+
// GetKind returns the Kind of the resource. This is needed because
18+
// when objects are retrieved from the Kubernetes API, the TypeMeta
19+
// (which contains Kind and APIVersion) is not populated.
20+
GetKind() string
1721
}
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
---
2+
kind: fix
3+
date: 2025-12-02
4+
---
5+
6+
* **MongoDB** Adding missing ownerrefs to ensure proper resource deletion by kubernetes.
7+
* **Single Cluster** Deleting resources created by CRD now only happens on multi-cluster deployments. Single Cluster will solely rely on ownerrefs.

controllers/operator/appdbreplicaset_controller.go

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
appsv1 "k8s.io/api/apps/v1"
2121
corev1 "k8s.io/api/core/v1"
2222
apiErrors "k8s.io/apimachinery/pkg/api/errors"
23+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2324

2425
mdbv1 "github.com/mongodb/mongodb-kubernetes/api/v1/mdb"
2526
omv1 "github.com/mongodb/mongodb-kubernetes/api/v1/om"
@@ -121,15 +122,18 @@ type ReconcileAppDbReplicaSet struct {
121122

122123
imageUrls images.ImageUrls
123124
initAppdbVersion string
125+
126+
ownerReferences []metav1.OwnerReference
124127
}
125128

126-
func NewAppDBReplicaSetReconciler(ctx context.Context, imageUrls images.ImageUrls, initAppdbVersion string, appDBSpec omv1.AppDBSpec, commonController *ReconcileCommonController, omConnectionFactory om.ConnectionFactory, omAnnotations map[string]string, globalMemberClustersMap map[string]client.Client, log *zap.SugaredLogger) (*ReconcileAppDbReplicaSet, error) {
129+
func NewAppDBReplicaSetReconciler(ctx context.Context, imageUrls images.ImageUrls, initAppdbVersion string, appDBSpec omv1.AppDBSpec, commonController *ReconcileCommonController, omConnectionFactory om.ConnectionFactory, omAnnotations map[string]string, globalMemberClustersMap map[string]client.Client, log *zap.SugaredLogger, ownerReferences []metav1.OwnerReference) (*ReconcileAppDbReplicaSet, error) {
127130
reconciler := &ReconcileAppDbReplicaSet{
128131
ReconcileCommonController: commonController,
129132
omConnectionFactory: omConnectionFactory,
130133
centralClient: commonController.client,
131134
imageUrls: imageUrls,
132135
initAppdbVersion: initAppdbVersion,
136+
ownerReferences: ownerReferences,
133137
}
134138

135139
if err := reconciler.initializeStateStore(ctx, appDBSpec, omAnnotations, log); err != nil {
@@ -148,7 +152,7 @@ func NewAppDBReplicaSetReconciler(ctx context.Context, imageUrls images.ImageUrl
148152
func (r *ReconcileAppDbReplicaSet) initializeStateStore(ctx context.Context, appDBSpec omv1.AppDBSpec, omAnnotations map[string]string, log *zap.SugaredLogger) error {
149153
r.deploymentState = NewAppDBDeploymentState()
150154

151-
r.stateStore = NewStateStore[AppDBDeploymentState](&appDBSpec, r.centralClient)
155+
r.stateStore = NewStateStore[AppDBDeploymentState](&appDBSpec, r.ownerReferences, r.centralClient)
152156
if state, err := r.stateStore.ReadState(ctx); err != nil {
153157
if apiErrors.IsNotFound(err) {
154158
// If the deployment state config map is missing, then it might be either:
@@ -286,6 +290,7 @@ func (r *ReconcileAppDbReplicaSet) writeLegacyStateConfigMaps(ctx context.Contex
286290
mappingConfigMap := configmap.Builder().
287291
SetName(spec.ClusterMappingConfigMapName()).
288292
SetLabels(spec.GetOwnerLabels()).
293+
SetOwnerReferences(r.ownerReferences).
289294
SetNamespace(spec.Namespace).
290295
SetData(mappingConfigMapData).
291296
Build()
@@ -302,6 +307,7 @@ func (r *ReconcileAppDbReplicaSet) writeLegacyStateConfigMaps(ctx context.Contex
302307
specConfigMap := configmap.Builder().
303308
SetName(spec.LastAppliedMemberSpecConfigMapName()).
304309
SetLabels(spec.GetOwnerLabels()).
310+
SetOwnerReferences(r.ownerReferences).
305311
SetNamespace(spec.Namespace).
306312
SetData(specConfigMapData).
307313
Build()
@@ -1732,6 +1738,7 @@ func (r *ReconcileAppDbReplicaSet) ensureProjectIDConfigMapForCluster(ctx contex
17321738
cm := configmap.Builder().
17331739
SetName(opsManager.Spec.AppDB.ProjectIDConfigMapName()).
17341740
SetLabels(opsManager.GetOwnerLabels()).
1741+
SetOwnerReferences(r.ownerReferences).
17351742
SetNamespace(opsManager.Namespace).
17361743
SetDataField(util.AppDbProjectIdKey, projectID).
17371744
Build()
@@ -1792,6 +1799,7 @@ func (r *ReconcileAppDbReplicaSet) publishACVersionAsConfigMap(ctx context.Conte
17921799

17931800
acVersionConfigMap := configmap.Builder().
17941801
SetLabels(labels).
1802+
SetOwnerReferences(r.ownerReferences).
17951803
SetNamespace(opsManager.Namespace).
17961804
SetName(cmName).
17971805
SetDataField(appDBACConfigMapVersionField, fmt.Sprintf("%d", version)).

controllers/operator/appdbreplicaset_controller_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1352,13 +1352,13 @@ func checkDeploymentEqualToPublished(t *testing.T, expected automationconfig.Aut
13521352

13531353
func newAppDbReconciler(ctx context.Context, c client.Client, opsManager *omv1.MongoDBOpsManager, omConnectionFactoryFunc om.ConnectionFactory, log *zap.SugaredLogger) (*ReconcileAppDbReplicaSet, error) {
13541354
commonController := NewReconcileCommonController(ctx, c)
1355-
return NewAppDBReplicaSetReconciler(ctx, nil, "", opsManager.Spec.AppDB, commonController, omConnectionFactoryFunc, opsManager.Annotations, nil, zap.S())
1355+
return NewAppDBReplicaSetReconciler(ctx, nil, "", opsManager.Spec.AppDB, commonController, omConnectionFactoryFunc, opsManager.Annotations, nil, log, kube.BaseOwnerReference(opsManager))
13561356
}
13571357

13581358
func newAppDbMultiReconciler(ctx context.Context, c client.Client, opsManager *omv1.MongoDBOpsManager, memberClusterMap map[string]client.Client, log *zap.SugaredLogger, omConnectionFactoryFunc om.ConnectionFactory) (*ReconcileAppDbReplicaSet, error) {
13591359
_ = c.Update(ctx, opsManager)
13601360
commonController := NewReconcileCommonController(ctx, c)
1361-
return NewAppDBReplicaSetReconciler(ctx, nil, "", opsManager.Spec.AppDB, commonController, omConnectionFactoryFunc, opsManager.Annotations, memberClusterMap, log)
1361+
return NewAppDBReplicaSetReconciler(ctx, nil, "", opsManager.Spec.AppDB, commonController, omConnectionFactoryFunc, opsManager.Annotations, memberClusterMap, log, kube.BaseOwnerReference(opsManager))
13621362
}
13631363

13641364
func TestChangingFCVAppDB(t *testing.T) {

controllers/operator/create/create_test.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ func TestBuildService(t *testing.T) {
4848

4949
assert.Len(t, svc.OwnerReferences, 1)
5050
assert.Equal(t, mdb.Name, svc.OwnerReferences[0].Name)
51-
assert.Equal(t, mdb.GetObjectKind().GroupVersionKind().Kind, svc.OwnerReferences[0].Kind)
51+
assert.Equal(t, "MongoDB", svc.OwnerReferences[0].Kind)
5252
assert.Equal(t, mock.TestNamespace, svc.Namespace)
5353
assert.Equal(t, "my-svc", svc.Name)
5454
assert.Equal(t, "loadbalancerip", svc.Spec.LoadBalancerIP)
@@ -67,7 +67,7 @@ func TestBuildService(t *testing.T) {
6767

6868
assert.Len(t, svc.OwnerReferences, 1)
6969
assert.Equal(t, mdb.Name, svc.OwnerReferences[0].Name)
70-
assert.Equal(t, mdb.GetObjectKind().GroupVersionKind().Kind, svc.OwnerReferences[0].Kind)
70+
assert.Equal(t, "MongoDB", svc.OwnerReferences[0].Kind)
7171
assert.Equal(t, mock.TestNamespace, svc.Namespace)
7272
assert.Equal(t, "my-svc", svc.Name)
7373
assert.Equal(t, "loadbalancerip", svc.Spec.LoadBalancerIP)
@@ -201,6 +201,7 @@ func TestOpsManagerInKubernetes_ClusterSpecificExternalConnectivity(t *testing.T
201201
OwnerReferences: []metav1.OwnerReference{
202202
{
203203
APIVersion: "mongodb.com/v1",
204+
Kind: "MongoDBOpsManager",
204205
Name: "test-om",
205206
Controller: ptr.To(true),
206207
BlockOwnerDeletion: ptr.To(true),
@@ -242,6 +243,7 @@ func TestOpsManagerInKubernetes_ClusterSpecificExternalConnectivity(t *testing.T
242243
OwnerReferences: []metav1.OwnerReference{
243244
{
244245
APIVersion: "mongodb.com/v1",
246+
Kind: "MongoDBOpsManager",
245247
Name: "test-om",
246248
Controller: ptr.To(true),
247249
BlockOwnerDeletion: ptr.To(true),
@@ -318,6 +320,7 @@ func TestOpsManagerInKubernetes_ClusterSpecificExternalConnectivity(t *testing.T
318320
OwnerReferences: []metav1.OwnerReference{
319321
{
320322
APIVersion: "mongodb.com/v1",
323+
Kind: "MongoDBOpsManager",
321324
Name: "test-om",
322325
Controller: ptr.To(true),
323326
BlockOwnerDeletion: ptr.To(true),
@@ -359,6 +362,7 @@ func TestOpsManagerInKubernetes_ClusterSpecificExternalConnectivity(t *testing.T
359362
OwnerReferences: []metav1.OwnerReference{
360363
{
361364
APIVersion: "mongodb.com/v1",
365+
Kind: "MongoDBOpsManager",
362366
Name: "test-om",
363367
Controller: ptr.To(true),
364368
BlockOwnerDeletion: ptr.To(true),
@@ -403,6 +407,7 @@ func TestOpsManagerInKubernetes_ClusterSpecificExternalConnectivity(t *testing.T
403407
OwnerReferences: []metav1.OwnerReference{
404408
{
405409
APIVersion: "mongodb.com/v1",
410+
Kind: "MongoDBOpsManager",
406411
Name: "test-om",
407412
Controller: ptr.To(true),
408413
BlockOwnerDeletion: ptr.To(true),

controllers/operator/mongodbopsmanager_controller.go

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -197,7 +197,7 @@ func NewOpsManagerReconcilerHelper(ctx context.Context, opsManagerReconciler *Op
197197
func (r *OpsManagerReconcilerHelper) initializeStateStore(ctx context.Context, reconciler *OpsManagerReconciler, opsManager *omv1.MongoDBOpsManager, globalMemberClustersMap map[string]client.Client, log *zap.SugaredLogger, clusterNamesFromClusterSpecList []string) error {
198198
r.deploymentState = NewOMDeploymentState()
199199

200-
r.stateStore = NewStateStore[OMDeploymentState](opsManager, reconciler.client)
200+
r.stateStore = NewStateStore[OMDeploymentState](opsManager, kube.BaseOwnerReference(opsManager), reconciler.client)
201201
if err := r.stateStore.read(ctx); err != nil {
202202
if apiErrors.IsNotFound(err) {
203203
// If the deployment state config map is missing, then it might be either:
@@ -253,6 +253,7 @@ func (r *OpsManagerReconcilerHelper) writeLegacyStateConfigMap(ctx context.Conte
253253
mappingConfigMap := configmap.Builder().
254254
SetName(spec.ClusterMappingConfigMapName()).
255255
SetLabels(spec.GetOwnerLabels()).
256+
SetOwnerReferences(kube.BaseOwnerReference(r.opsManager)).
256257
SetNamespace(spec.Namespace).
257258
SetData(mappingConfigMapData).
258259
Build()
@@ -2128,27 +2129,31 @@ func (r *OpsManagerReconciler) OnDelete(ctx context.Context, obj interface{}, lo
21282129
return
21292130
}
21302131

2131-
// delete the OpsManager resources from each of the member cluster. We need to delete the
2132-
// resource explicitly in case of multi-cluster because we can't set owner reference cross cluster
2133-
for _, memberCluster := range helper.getHealthyMemberClusters() {
2134-
if err := r.deleteClusterResources(ctx, memberCluster.Client, memberCluster.Name, opsManager, log); err != nil {
2135-
log.Warnf("Failed to delete dependant OpsManager resources in cluster %s: %s", memberCluster.Name, err)
2132+
// Delete resources explicitly only in multi-cluster mode where we can't set owner references cross cluster.
2133+
// In single-cluster deployments, OwnerReferences handle cleanup automatically via Kubernetes garbage collection.
2134+
if opsManager.Spec.IsMultiCluster() {
2135+
for _, memberCluster := range helper.getHealthyMemberClusters() {
2136+
if err := r.deleteClusterResources(ctx, memberCluster.Client, memberCluster.Name, opsManager, log); err != nil {
2137+
log.Warnf("Failed to delete dependant OpsManager resources in cluster %s: %s", memberCluster.Name, err)
2138+
}
21362139
}
21372140
}
21382141

2139-
// delete the AppDB resources from each of the member cluster. We need to delete the
2140-
// resource explicitly in case of multi-cluster because we can't set owner reference cross cluster
2141-
for _, memberCluster := range appDbReconciler.GetHealthyMemberClusters() {
2142-
if err := r.deleteClusterResources(ctx, memberCluster.Client, memberCluster.Name, opsManager, log); err != nil {
2143-
log.Warnf("Failed to delete dependant AppDB resources in cluster %s: %s", memberCluster.Name, err.Error())
2142+
if opsManager.Spec.AppDB.IsMultiCluster() {
2143+
for _, memberCluster := range appDbReconciler.GetHealthyMemberClusters() {
2144+
if err := r.deleteClusterResources(ctx, memberCluster.Client, memberCluster.Name, opsManager, log); err != nil {
2145+
log.Warnf("Failed to delete dependant AppDB resources in cluster %s: %s", memberCluster.Name, err.Error())
2146+
}
21442147
}
21452148
}
21462149

2150+
r.resourceWatcher.RemoveDependentWatchedResources(opsManager.ObjectKey())
2151+
21472152
log.Info("Cleaned up Ops Manager related resources.")
21482153
}
21492154

21502155
func (r *OpsManagerReconciler) createNewAppDBReconciler(ctx context.Context, opsManager *omv1.MongoDBOpsManager, log *zap.SugaredLogger) (*ReconcileAppDbReplicaSet, error) {
2151-
return NewAppDBReplicaSetReconciler(ctx, r.imageUrls, r.initAppdbVersion, opsManager.Spec.AppDB, r.ReconcileCommonController, r.omConnectionFactory, opsManager.Annotations, r.memberClustersMap, log)
2156+
return NewAppDBReplicaSetReconciler(ctx, r.imageUrls, r.initAppdbVersion, opsManager.Spec.AppDB, r.ReconcileCommonController, r.omConnectionFactory, opsManager.Annotations, r.memberClustersMap, log, kube.BaseOwnerReference(opsManager))
21522157
}
21532158

21542159
// getAnnotationsForOpsManagerResource returns all the annotations that should be applied to the resource

controllers/operator/mongodbopsmanager_controller_test.go

Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -219,6 +219,87 @@ func TestOpsManagerReconciler_removeWatchedResources(t *testing.T) {
219219
assert.Zero(t, len(reconciler.resourceWatcher.GetWatchedResources()))
220220
}
221221

222+
// TestOpsManagerReconciler_OnDeleteClusterResourceCleanup verifies the DeleteAllOf behavior:
223+
// - In single-cluster mode: DeleteAllOf should NOT be called (OwnerReferences handle cleanup)
224+
// - In multi-cluster mode: DeleteAllOf SHOULD be called (can't use OwnerReferences cross-cluster)
225+
func TestOpsManagerReconciler_OnDeleteClusterResourceCleanup(t *testing.T) {
226+
multiClusterSpecItems := []omv1.ClusterSpecOMItem{
227+
{ClusterName: "cluster-a", Members: 1},
228+
{ClusterName: "cluster-b", Members: 1},
229+
}
230+
231+
testCases := []struct {
232+
name string
233+
isMultiCluster bool
234+
clusterSpecItems []omv1.ClusterSpecOMItem
235+
}{
236+
{
237+
name: "SingleCluster_SkipsDeleteAllOf",
238+
isMultiCluster: false,
239+
clusterSpecItems: nil,
240+
},
241+
{
242+
name: "MultiCluster_CallsDeleteAllOf",
243+
isMultiCluster: true,
244+
clusterSpecItems: multiClusterSpecItems,
245+
},
246+
}
247+
248+
for _, tc := range testCases {
249+
t.Run(tc.name, func(t *testing.T) {
250+
ctx := context.Background()
251+
deleteAllOfCallCount := 0
252+
omConnectionFactory := om.NewDefaultCachedOMConnectionFactory()
253+
254+
omBuilder := DefaultOpsManagerBuilder()
255+
var memberClustersMap map[string]client.Client
256+
257+
// Create fake client with interceptor that tracks DeleteAllOf calls
258+
deleteAllOfInterceptor := func(ctx context.Context, client client.WithWatch, obj client.Object, opts ...client.DeleteAllOfOption) error {
259+
deleteAllOfCallCount++
260+
return nil
261+
}
262+
263+
if tc.isMultiCluster {
264+
omBuilder.SetOpsManagerTopology(omv1.ClusterTopologyMultiCluster).
265+
SetOpsManagerClusterSpecList(tc.clusterSpecItems)
266+
memberClustersMap = make(map[string]client.Client)
267+
for _, clusterSpec := range tc.clusterSpecItems {
268+
memberFakeClientBuilder := mock.NewEmptyFakeClientBuilder()
269+
memberFakeClientBuilder.WithInterceptorFuncs(interceptor.Funcs{
270+
Get: mock.GetFakeClientInterceptorGetFunc(omConnectionFactory, true, true),
271+
DeleteAllOf: deleteAllOfInterceptor,
272+
})
273+
memberClustersMap[clusterSpec.ClusterName] = memberFakeClientBuilder.Build()
274+
}
275+
}
276+
277+
testOm := omBuilder.Build()
278+
assert.Equal(t, tc.isMultiCluster, testOm.Spec.IsMultiCluster(), "topology mismatch")
279+
280+
fakeClientBuilder := mock.NewEmptyFakeClientBuilder()
281+
fakeClientBuilder.WithObjects(testOm.DeepCopy())
282+
fakeClientBuilder.WithInterceptorFuncs(interceptor.Funcs{
283+
Get: mock.GetFakeClientInterceptorGetFunc(omConnectionFactory, true, true),
284+
DeleteAllOf: deleteAllOfInterceptor,
285+
})
286+
kubeClient := kubernetesClient.NewClient(fakeClientBuilder.Build())
287+
288+
reconciler := NewOpsManagerReconciler(ctx, kubeClient, memberClustersMap, images.ImageUrls{}, "", "", omConnectionFactory.GetConnectionFunc, &MockedInitializer{expectedOmURL: testOm.CentralURL(), t: t}, func(baseUrl string, user string, publicApiKey string, ca *string) api.OpsManagerAdmin {
289+
return api.NewMockedAdminProvider(baseUrl, user, publicApiKey, true).(*api.MockedOmAdmin)
290+
})
291+
292+
reconciler.OnDelete(ctx, testOm, zap.S())
293+
294+
if tc.isMultiCluster {
295+
assert.Greater(t, deleteAllOfCallCount, 0, "DeleteAllOf should be called in multi-cluster mode")
296+
} else {
297+
assert.Equal(t, 0, deleteAllOfCallCount, "DeleteAllOf should not be called in single-cluster mode; cleanup relies on OwnerReferences")
298+
}
299+
})
300+
}
301+
}
302+
222303
func TestOpsManagerReconciler_prepareOpsManager(t *testing.T) {
223304
ctx := context.Background()
224305
testOm := DefaultOpsManagerBuilder().Build()

0 commit comments

Comments
 (0)