From e6a32e82620fc929cd7d95ef716ab1d5ef237cfd Mon Sep 17 00:00:00 2001 From: sangitaray2021 Date: Fri, 20 Sep 2024 12:55:20 +0530 Subject: [PATCH] refactored tracker execution and caller Signed-off-by: sangitaray2021 --- pkg/cmd/server/server.go | 3 - pkg/controller/restore_controller.go | 23 +++---- pkg/restore/request.go | 26 ++++---- pkg/restore/restore.go | 16 ++--- pkg/util/kube/utils.go | 97 +++++++++++++++------------- pkg/util/kube/utils_test.go | 13 ++++ 6 files changed, 97 insertions(+), 81 deletions(-) diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index edc561708a..a697e4d01b 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -82,7 +82,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/restore" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/util/filesystem" - "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/logging" ) @@ -775,7 +774,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string } multiHookTracker := hook.NewMultiHookTracker() - namespaceDeletionStatusTracker := kube.NewNamespaceDeletionStatusTracker() if _, ok := enabledRuntimeControllers[constant.ControllerRestore]; ok { restorer, err := restore.NewKubernetesRestorer( @@ -800,7 +798,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.credentialFileStore, s.mgr.GetClient(), multiHookTracker, - namespaceDeletionStatusTracker, ) cmd.CheckError(err) diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index aa55715c2c..4cd5e39ffb 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -559,17 +559,18 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu } restoreReq := &pkgrestore.Request{ - Log: restoreLog, - Restore: restore, - Backup: info.backup, - PodVolumeBackups: podVolumeBackups, - VolumeSnapshots: volumeSnapshots, - BackupReader: backupFile, - ResourceModifiers: resourceModifiers, - DisableInformerCache: r.disableInformerCache, - CSIVolumeSnapshots: csiVolumeSnapshots, - BackupVolumeInfoMap: backupVolumeInfoMap, - RestoreVolumeInfoTracker: volume.NewRestoreVolInfoTracker(restore, restoreLog, r.globalCrClient), + Log: restoreLog, + Restore: restore, + Backup: info.backup, + PodVolumeBackups: podVolumeBackups, + VolumeSnapshots: volumeSnapshots, + BackupReader: backupFile, + ResourceModifiers: resourceModifiers, + DisableInformerCache: r.disableInformerCache, + CSIVolumeSnapshots: csiVolumeSnapshots, + BackupVolumeInfoMap: backupVolumeInfoMap, + RestoreVolumeInfoTracker: volume.NewRestoreVolInfoTracker(restore, restoreLog, r.globalCrClient), + NamespaceDeletionStatusTracker: kubeutil.NewNamespaceDeletionStatusTracker(), } restoreWarnings, restoreErrors := r.restorer.RestoreWithResolvers(restoreReq, actionsResolver, pluginManager) diff --git a/pkg/restore/request.go b/pkg/restore/request.go index d6e341c4ff..d2f652f793 100644 --- a/pkg/restore/request.go +++ b/pkg/restore/request.go @@ -29,6 +29,7 @@ import ( "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/itemoperation" + "github.com/vmware-tanzu/velero/pkg/util/kube" ) const ( @@ -52,18 +53,19 @@ func resourceKey(obj runtime.Object) string { type Request struct { *velerov1api.Restore - Log logrus.FieldLogger - Backup *velerov1api.Backup - PodVolumeBackups []*velerov1api.PodVolumeBackup - VolumeSnapshots []*volume.Snapshot - BackupReader io.Reader - RestoredItems map[itemKey]restoredItemStatus - itemOperationsList *[]*itemoperation.RestoreOperation - ResourceModifiers *resourcemodifiers.ResourceModifiers - DisableInformerCache bool - CSIVolumeSnapshots []*snapshotv1api.VolumeSnapshot - BackupVolumeInfoMap map[string]volume.BackupVolumeInfo - RestoreVolumeInfoTracker *volume.RestoreVolumeInfoTracker + Log logrus.FieldLogger + Backup *velerov1api.Backup + PodVolumeBackups []*velerov1api.PodVolumeBackup + VolumeSnapshots []*volume.Snapshot + BackupReader io.Reader + RestoredItems map[itemKey]restoredItemStatus + itemOperationsList *[]*itemoperation.RestoreOperation + ResourceModifiers *resourcemodifiers.ResourceModifiers + DisableInformerCache bool + CSIVolumeSnapshots []*snapshotv1api.VolumeSnapshot + BackupVolumeInfoMap map[string]volume.BackupVolumeInfo + RestoreVolumeInfoTracker *volume.RestoreVolumeInfoTracker + NamespaceDeletionStatusTracker kube.NamespaceDeletionStatusTracker } type restoredItemStatus struct { diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index c7164624a3..2717f420d5 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -137,7 +137,6 @@ func NewKubernetesRestorer( credentialStore credentials.FileStore, kbClient crclient.Client, multiHookTracker *hook.MultiHookTracker, - namespaceDeletionStatusTracker kube.NamespaceDeletionStatusTracker, ) (Restorer, error) { return &kubernetesRestorer{ discoveryHelper: discoveryHelper, @@ -157,13 +156,12 @@ func NewKubernetesRestorer( veleroCloneName := "velero-clone-" + veleroCloneUUID.String() return veleroCloneName, nil }, - fileSystem: filesystem.NewFileSystem(), - podCommandExecutor: podCommandExecutor, - podGetter: podGetter, - credentialFileStore: credentialStore, - kbClient: kbClient, - multiHookTracker: multiHookTracker, - namespaceDeletionStatusTracker: namespaceDeletionStatusTracker, + fileSystem: filesystem.NewFileSystem(), + podCommandExecutor: podCommandExecutor, + podGetter: podGetter, + credentialFileStore: credentialStore, + kbClient: kbClient, + multiHookTracker: multiHookTracker, }, nil } @@ -326,7 +324,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( backupVolumeInfoMap: req.BackupVolumeInfoMap, restoreVolumeInfoTracker: req.RestoreVolumeInfoTracker, hooksWaitExecutor: hooksWaitExecutor, - namespaceDeletionStatusTracker: kr.namespaceDeletionStatusTracker, + namespaceDeletionStatusTracker: req.NamespaceDeletionStatusTracker, } return restoreCtx.execute() diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 920daf8049..0ecd30aeac 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -80,63 +80,68 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core // if namespace is marked for deletion, and we timed out, report an error var terminatingNamespace bool - // Check if the namespace is already in the process of being deleted - if namespaceDeletionStatusTracker.Contains(namespace.Name, namespace.Name) { - return false, nsCreated, errors.Errorf("namespace %s is already present in the polling set to skipping", namespace.Name) - } else { - - // Added the namespace to the polling tracker set - namespaceDeletionStatusTracker.Add(namespace.Name, namespace.Name) - err = wait.PollUntilContextTimeout(context.Background(), time.Second, timeout, true, func(ctx context.Context) (bool, error) { - clusterNS, err := client.Get(ctx, namespace.Name, metav1.GetOptions{}) - // if namespace is marked for deletion, and we timed out, report an error - - if apierrors.IsNotFound(err) { - // Namespace isn't in cluster, we're good to create. - return true, nil - } - - if err != nil { - // Return the err and exit the loop. - return true, err - } + var namespaceAlreadyInDeletionTracker bool - if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) { - // Marked for deletion, keep waiting - terminatingNamespace = true - return false, nil - } + err = wait.PollUntilContextTimeout(context.Background(), time.Second, timeout, true, func(ctx context.Context) (bool, error) { + clusterNS, err := client.Get(ctx, namespace.Name, metav1.GetOptions{}) + // if namespace is marked for deletion, and we timed out, report an error - // clusterNS found, is not nil, and not marked for deletion, therefore we shouldn't create it. - ready = true + if apierrors.IsNotFound(err) { + // Namespace isn't in cluster, we're good to create. return true, nil - }) + } - namespaceDeletionStatusTracker.Delete(namespace.Name, namespace.Name) - // err will be set if we timed out or encountered issues retrieving the namespace, if err != nil { - if terminatingNamespace { - return false, nsCreated, errors.Wrapf(err, "timed out waiting for terminating namespace %s to disappear before restoring", namespace.Name) + // Return the err and exit the loop. + return true, err + } + + if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) { + + if namespaceDeletionStatusTracker.Contains(clusterNS.Name, clusterNS.Name) { + namespaceAlreadyInDeletionTracker = true + return true, errors.Errorf("namespace %s is already present in the polling set, skipping execution", namespace.Name) } - return false, nsCreated, errors.Wrapf(err, "error getting namespace %s", namespace.Name) + + // Marked for deletion, keep waiting + terminatingNamespace = true + return false, nil } - // In the case the namespace already exists and isn't marked for deletion, assume it's ready for use. - if ready { - return true, nsCreated, nil + // clusterNS found, is not nil, and not marked for deletion, therefore we shouldn't create it. + ready = true + return true, nil + }) + + // err will be set if we timed out or encountered issues retrieving the namespace, + if err != nil { + if terminatingNamespace { + // If the namespace is marked for deletion, and we timed out, adding it in tracker + namespaceDeletionStatusTracker.Add(namespace.Name, namespace.Name) + return false, nsCreated, errors.Wrapf(err, "timed out waiting for terminating namespace %s to disappear before restoring", namespace.Name) + + } else if namespaceAlreadyInDeletionTracker { + // If the namespace is already in the tracker, return an error. + return false, nsCreated, err } + return false, nsCreated, errors.Wrapf(err, "error getting namespace %s", namespace.Name) + } - clusterNS, err := client.Create(context.TODO(), namespace, metav1.CreateOptions{}) - if apierrors.IsAlreadyExists(err) { - if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) { - // Somehow created after all our polling and marked for deletion, return an error - return false, nsCreated, errors.Errorf("namespace %s created and marked for termination after timeout", namespace.Name) - } - } else if err != nil { - return false, nsCreated, errors.Wrapf(err, "error creating namespace %s", namespace.Name) - } else { - nsCreated = true + // In the case the namespace already exists and isn't marked for deletion, assume it's ready for use. + if ready { + return true, nsCreated, nil + } + + clusterNS, err := client.Create(context.TODO(), namespace, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) { + // Somehow created after all our polling and marked for deletion, return an error + return false, nsCreated, errors.Errorf("namespace %s created and marked for termination after timeout", namespace.Name) } + } else if err != nil { + return false, nsCreated, errors.Wrapf(err, "error creating namespace %s", namespace.Name) + } else { + nsCreated = true } // The namespace created successfully diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index 4bc77e3bf7..9a4bd5e135 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -55,6 +55,7 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { alreadyExists bool expectedResult bool expectedCreatedResult bool + nsAlreadyInTerminationTracker bool namespaceDeletionStatusTracker NamespaceDeletionStatusTracker }{ { @@ -96,6 +97,14 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { expectedResult: false, expectedCreatedResult: false, }, + { + name: "same namespace found earlier, terminating phase already tracked", + expectNSFound: true, + nsPhase: corev1.NamespaceTerminating, + expectedResult: false, + expectedCreatedResult: false, + nsAlreadyInTerminationTracker: true, + }, } namespaceDeletionStatusTracker := NewNamespaceDeletionStatusTracker() @@ -134,6 +143,10 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { nsClient.On("Create", namespace).Return(namespace, nil) } + if test.nsAlreadyInTerminationTracker { + namespaceDeletionStatusTracker.Add("test", "test") + } + result, nsCreated, _ := EnsureNamespaceExistsAndIsReady(namespace, nsClient, timeout, namespaceDeletionStatusTracker) assert.Equal(t, test.expectedResult, result)