Skip to content

Commit

Permalink
refactored tracker execution and caller
Browse files Browse the repository at this point in the history
Signed-off-by: sangitaray2021 <[email protected]>
  • Loading branch information
sangitaray2021 committed Sep 23, 2024
1 parent 755bbd8 commit e6a32e8
Show file tree
Hide file tree
Showing 6 changed files with 97 additions and 81 deletions.
3 changes: 0 additions & 3 deletions pkg/cmd/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/restore"
"github.com/vmware-tanzu/velero/pkg/uploader"
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
"github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/pkg/util/logging"
)

Expand Down Expand Up @@ -775,7 +774,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
}

multiHookTracker := hook.NewMultiHookTracker()
namespaceDeletionStatusTracker := kube.NewNamespaceDeletionStatusTracker()

if _, ok := enabledRuntimeControllers[constant.ControllerRestore]; ok {
restorer, err := restore.NewKubernetesRestorer(
Expand All @@ -800,7 +798,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
s.credentialFileStore,
s.mgr.GetClient(),
multiHookTracker,
namespaceDeletionStatusTracker,
)

cmd.CheckError(err)
Expand Down
23 changes: 12 additions & 11 deletions pkg/controller/restore_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -559,17 +559,18 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu
}

restoreReq := &pkgrestore.Request{
Log: restoreLog,
Restore: restore,
Backup: info.backup,
PodVolumeBackups: podVolumeBackups,
VolumeSnapshots: volumeSnapshots,
BackupReader: backupFile,
ResourceModifiers: resourceModifiers,
DisableInformerCache: r.disableInformerCache,
CSIVolumeSnapshots: csiVolumeSnapshots,
BackupVolumeInfoMap: backupVolumeInfoMap,
RestoreVolumeInfoTracker: volume.NewRestoreVolInfoTracker(restore, restoreLog, r.globalCrClient),
Log: restoreLog,
Restore: restore,
Backup: info.backup,
PodVolumeBackups: podVolumeBackups,
VolumeSnapshots: volumeSnapshots,
BackupReader: backupFile,
ResourceModifiers: resourceModifiers,
DisableInformerCache: r.disableInformerCache,
CSIVolumeSnapshots: csiVolumeSnapshots,
BackupVolumeInfoMap: backupVolumeInfoMap,
RestoreVolumeInfoTracker: volume.NewRestoreVolInfoTracker(restore, restoreLog, r.globalCrClient),
NamespaceDeletionStatusTracker: kubeutil.NewNamespaceDeletionStatusTracker(),
}
restoreWarnings, restoreErrors := r.restorer.RestoreWithResolvers(restoreReq, actionsResolver, pluginManager)

Expand Down
26 changes: 14 additions & 12 deletions pkg/restore/request.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"github.com/vmware-tanzu/velero/internal/volume"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/itemoperation"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)

const (
Expand All @@ -52,18 +53,19 @@ func resourceKey(obj runtime.Object) string {
type Request struct {
*velerov1api.Restore

Log logrus.FieldLogger
Backup *velerov1api.Backup
PodVolumeBackups []*velerov1api.PodVolumeBackup
VolumeSnapshots []*volume.Snapshot
BackupReader io.Reader
RestoredItems map[itemKey]restoredItemStatus
itemOperationsList *[]*itemoperation.RestoreOperation
ResourceModifiers *resourcemodifiers.ResourceModifiers
DisableInformerCache bool
CSIVolumeSnapshots []*snapshotv1api.VolumeSnapshot
BackupVolumeInfoMap map[string]volume.BackupVolumeInfo
RestoreVolumeInfoTracker *volume.RestoreVolumeInfoTracker
Log logrus.FieldLogger
Backup *velerov1api.Backup
PodVolumeBackups []*velerov1api.PodVolumeBackup
VolumeSnapshots []*volume.Snapshot
BackupReader io.Reader
RestoredItems map[itemKey]restoredItemStatus
itemOperationsList *[]*itemoperation.RestoreOperation
ResourceModifiers *resourcemodifiers.ResourceModifiers
DisableInformerCache bool
CSIVolumeSnapshots []*snapshotv1api.VolumeSnapshot
BackupVolumeInfoMap map[string]volume.BackupVolumeInfo
RestoreVolumeInfoTracker *volume.RestoreVolumeInfoTracker
NamespaceDeletionStatusTracker kube.NamespaceDeletionStatusTracker
}

type restoredItemStatus struct {
Expand Down
16 changes: 7 additions & 9 deletions pkg/restore/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,6 @@ func NewKubernetesRestorer(
credentialStore credentials.FileStore,
kbClient crclient.Client,
multiHookTracker *hook.MultiHookTracker,
namespaceDeletionStatusTracker kube.NamespaceDeletionStatusTracker,
) (Restorer, error) {
return &kubernetesRestorer{
discoveryHelper: discoveryHelper,
Expand All @@ -157,13 +156,12 @@ func NewKubernetesRestorer(
veleroCloneName := "velero-clone-" + veleroCloneUUID.String()
return veleroCloneName, nil
},
fileSystem: filesystem.NewFileSystem(),
podCommandExecutor: podCommandExecutor,
podGetter: podGetter,
credentialFileStore: credentialStore,
kbClient: kbClient,
multiHookTracker: multiHookTracker,
namespaceDeletionStatusTracker: namespaceDeletionStatusTracker,
fileSystem: filesystem.NewFileSystem(),
podCommandExecutor: podCommandExecutor,
podGetter: podGetter,
credentialFileStore: credentialStore,
kbClient: kbClient,
multiHookTracker: multiHookTracker,
}, nil
}

Expand Down Expand Up @@ -326,7 +324,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
backupVolumeInfoMap: req.BackupVolumeInfoMap,
restoreVolumeInfoTracker: req.RestoreVolumeInfoTracker,
hooksWaitExecutor: hooksWaitExecutor,
namespaceDeletionStatusTracker: kr.namespaceDeletionStatusTracker,
namespaceDeletionStatusTracker: req.NamespaceDeletionStatusTracker,
}

return restoreCtx.execute()
Expand Down
97 changes: 51 additions & 46 deletions pkg/util/kube/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,63 +80,68 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core
// if namespace is marked for deletion, and we timed out, report an error
var terminatingNamespace bool

// Check if the namespace is already in the process of being deleted
if namespaceDeletionStatusTracker.Contains(namespace.Name, namespace.Name) {
return false, nsCreated, errors.Errorf("namespace %s is already present in the polling set to skipping", namespace.Name)
} else {

// Added the namespace to the polling tracker set
namespaceDeletionStatusTracker.Add(namespace.Name, namespace.Name)
err = wait.PollUntilContextTimeout(context.Background(), time.Second, timeout, true, func(ctx context.Context) (bool, error) {
clusterNS, err := client.Get(ctx, namespace.Name, metav1.GetOptions{})
// if namespace is marked for deletion, and we timed out, report an error

if apierrors.IsNotFound(err) {
// Namespace isn't in cluster, we're good to create.
return true, nil
}

if err != nil {
// Return the err and exit the loop.
return true, err
}
var namespaceAlreadyInDeletionTracker bool

if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) {
// Marked for deletion, keep waiting
terminatingNamespace = true
return false, nil
}
err = wait.PollUntilContextTimeout(context.Background(), time.Second, timeout, true, func(ctx context.Context) (bool, error) {
clusterNS, err := client.Get(ctx, namespace.Name, metav1.GetOptions{})
// if namespace is marked for deletion, and we timed out, report an error

// clusterNS found, is not nil, and not marked for deletion, therefore we shouldn't create it.
ready = true
if apierrors.IsNotFound(err) {
// Namespace isn't in cluster, we're good to create.
return true, nil
})
}

namespaceDeletionStatusTracker.Delete(namespace.Name, namespace.Name)
// err will be set if we timed out or encountered issues retrieving the namespace,
if err != nil {
if terminatingNamespace {
return false, nsCreated, errors.Wrapf(err, "timed out waiting for terminating namespace %s to disappear before restoring", namespace.Name)
// Return the err and exit the loop.
return true, err
}

if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) {

Check failure on line 99 in pkg/util/kube/utils.go

View workflow job for this annotation

GitHub Actions / Run Linter Check

unnecessary leading newline (whitespace)

if namespaceDeletionStatusTracker.Contains(clusterNS.Name, clusterNS.Name) {
namespaceAlreadyInDeletionTracker = true
return true, errors.Errorf("namespace %s is already present in the polling set, skipping execution", namespace.Name)
}
return false, nsCreated, errors.Wrapf(err, "error getting namespace %s", namespace.Name)

// Marked for deletion, keep waiting
terminatingNamespace = true
return false, nil
}

// In the case the namespace already exists and isn't marked for deletion, assume it's ready for use.
if ready {
return true, nsCreated, nil
// clusterNS found, is not nil, and not marked for deletion, therefore we shouldn't create it.
ready = true
return true, nil
})

// err will be set if we timed out or encountered issues retrieving the namespace,
if err != nil {
if terminatingNamespace {
// If the namespace is marked for deletion, and we timed out, adding it in tracker
namespaceDeletionStatusTracker.Add(namespace.Name, namespace.Name)
return false, nsCreated, errors.Wrapf(err, "timed out waiting for terminating namespace %s to disappear before restoring", namespace.Name)

} else if namespaceAlreadyInDeletionTracker {

Check failure on line 123 in pkg/util/kube/utils.go

View workflow job for this annotation

GitHub Actions / Run Linter Check

unnecessary trailing newline (whitespace)
// If the namespace is already in the tracker, return an error.
return false, nsCreated, err
}
return false, nsCreated, errors.Wrapf(err, "error getting namespace %s", namespace.Name)
}

clusterNS, err := client.Create(context.TODO(), namespace, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) {
// Somehow created after all our polling and marked for deletion, return an error
return false, nsCreated, errors.Errorf("namespace %s created and marked for termination after timeout", namespace.Name)
}
} else if err != nil {
return false, nsCreated, errors.Wrapf(err, "error creating namespace %s", namespace.Name)
} else {
nsCreated = true
// In the case the namespace already exists and isn't marked for deletion, assume it's ready for use.
if ready {
return true, nsCreated, nil
}

clusterNS, err := client.Create(context.TODO(), namespace, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
if clusterNS != nil && (clusterNS.GetDeletionTimestamp() != nil || clusterNS.Status.Phase == corev1api.NamespaceTerminating) {
// Somehow created after all our polling and marked for deletion, return an error
return false, nsCreated, errors.Errorf("namespace %s created and marked for termination after timeout", namespace.Name)
}
} else if err != nil {
return false, nsCreated, errors.Wrapf(err, "error creating namespace %s", namespace.Name)
} else {
nsCreated = true
}

// The namespace created successfully
Expand Down
13 changes: 13 additions & 0 deletions pkg/util/kube/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) {
alreadyExists bool
expectedResult bool
expectedCreatedResult bool
nsAlreadyInTerminationTracker bool
namespaceDeletionStatusTracker NamespaceDeletionStatusTracker
}{
{
Expand Down Expand Up @@ -96,6 +97,14 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) {
expectedResult: false,
expectedCreatedResult: false,
},
{
name: "same namespace found earlier, terminating phase already tracked",
expectNSFound: true,
nsPhase: corev1.NamespaceTerminating,
expectedResult: false,
expectedCreatedResult: false,
nsAlreadyInTerminationTracker: true,
},
}

namespaceDeletionStatusTracker := NewNamespaceDeletionStatusTracker()
Expand Down Expand Up @@ -134,6 +143,10 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) {
nsClient.On("Create", namespace).Return(namespace, nil)
}

if test.nsAlreadyInTerminationTracker {
namespaceDeletionStatusTracker.Add("test", "test")
}

result, nsCreated, _ := EnsureNamespaceExistsAndIsReady(namespace, nsClient, timeout, namespaceDeletionStatusTracker)

assert.Equal(t, test.expectedResult, result)
Expand Down

0 comments on commit e6a32e8

Please sign in to comment.