diff --git a/pkg/configauditreport/builder.go b/pkg/configauditreport/builder.go index 6c6dde732..99451adeb 100644 --- a/pkg/configauditreport/builder.go +++ b/pkg/configauditreport/builder.go @@ -30,6 +30,7 @@ type ReportBuilder struct { resourceLabelsToInclude []string additionalReportLabels labels.Set etc.Config + scopeResolver *kube.K8sScope } func NewReportBuilder(scheme *runtime.Scheme) *ReportBuilder { @@ -38,6 +39,11 @@ func NewReportBuilder(scheme *runtime.Scheme) *ReportBuilder { } } +func (b *ReportBuilder) ScopeResolver(sr *kube.K8sScope) *ReportBuilder { + b.scopeResolver = sr + return b +} + func (b *ReportBuilder) Controller(controller client.Object) *ReportBuilder { b.controller = controller return b @@ -170,7 +176,7 @@ func (b *ReportBuilder) GetReport() (v1alpha1.ConfigAuditReport, error) { } func (b *ReportBuilder) Write(ctx context.Context, writer Writer) error { - if kube.IsClusterScopedKind(b.controller.GetObjectKind().GroupVersionKind().Kind) { + if b.scopeResolver != nil && b.scopeResolver.IsClusterScope(b.controller.GetObjectKind().GroupVersionKind().Kind) { report, err := b.GetClusterReport() if err != nil { return err diff --git a/pkg/configauditreport/controller/nodecollector.go b/pkg/configauditreport/controller/nodecollector.go index 9581e312b..0cf12cd9b 100644 --- a/pkg/configauditreport/controller/nodecollector.go +++ b/pkg/configauditreport/controller/nodecollector.go @@ -160,6 +160,7 @@ func (r *NodeCollectorJobController) processCompleteScanJob(ctx context.Context, return fmt.Errorf("failed to evaluate policies on Node : %w", err) } infraReportBuilder := infraassessment.NewReportBuilder(r.Client.Scheme()). + ScopeResolver(&r.ObjectResolver.K8sScope). Controller(node). ResourceSpecHash(resourceHash). PluginConfigHash(policiesHash). diff --git a/pkg/configauditreport/controller/policyconfig.go b/pkg/configauditreport/controller/policyconfig.go index 37eb58903..9936a459b 100644 --- a/pkg/configauditreport/controller/policyconfig.go +++ b/pkg/configauditreport/controller/policyconfig.go @@ -6,9 +6,6 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" ctrl "sigs.k8s.io/controller-runtime" @@ -41,32 +38,14 @@ type PolicyConfigController struct { // Controller for trivy-operator-policies-config in the operator namespace; must be cluster scoped even with namespace predicate // +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch - func (r *PolicyConfigController) SetupWithManager(mgr ctrl.Manager) error { - - // Determine which Kubernetes workloads the controller will reconcile and add them to resources targetWorkloads := r.Config.GetTargetWorkloads() - workloadResources := make([]kube.Resource, 0) - for _, tw := range targetWorkloads { - var resource kube.Resource - if err := resource.GetWorkloadResource(tw, &v1alpha1.ConfigAuditReport{}, r.ObjectResolver); err != nil { - return err - } - workloadResources = append(workloadResources, resource) - } - // Add non workload related resources - resources := []kube.Resource{ - {Kind: kube.KindService, ForObject: &corev1.Service{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindConfigMap, ForObject: &corev1.ConfigMap{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindRole, ForObject: &rbacv1.Role{}, OwnsObject: &v1alpha1.RbacAssessmentReport{}}, - {Kind: kube.KindRoleBinding, ForObject: &rbacv1.RoleBinding{}, OwnsObject: &v1alpha1.RbacAssessmentReport{}}, - {Kind: kube.KindNetworkPolicy, ForObject: &networkingv1.NetworkPolicy{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindResourceQuota, ForObject: &corev1.ResourceQuota{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindLimitRange, ForObject: &corev1.LimitRange{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, + resources, clusterResources, err := kube.GetActiveResource(targetWorkloads, r.ObjectResolver, r.Scheme()) + if err != nil { + return fmt.Errorf("unable to setup resources for PolicyConfigController: %w", err) } - resources = append(resources, workloadResources...) for _, configResource := range resources { if err := ctrl.NewControllerManagedBy(mgr). For(&corev1.ConfigMap{}, builder.WithPredicates( @@ -77,13 +56,6 @@ func (r *PolicyConfigController) SetupWithManager(mgr ctrl.Manager) error { Complete(r.reconcileConfig(configResource.Kind)); err != nil { return fmt.Errorf("constructing controller for %s: %w", configResource.Kind, err) } - - } - - clusterResources := []kube.Resource{ - {Kind: kube.KindClusterRole, ForObject: &rbacv1.ClusterRole{}, OwnsObject: &v1alpha1.ClusterRbacAssessmentReport{}}, - {Kind: kube.KindClusterRoleBindings, ForObject: &rbacv1.ClusterRoleBinding{}, OwnsObject: &v1alpha1.ClusterRbacAssessmentReport{}}, - {Kind: kube.KindCustomResourceDefinition, ForObject: &apiextensionsv1.CustomResourceDefinition{}, OwnsObject: &v1alpha1.ClusterConfigAuditReport{}}, } for _, resource := range clusterResources { @@ -97,9 +69,7 @@ func (r *PolicyConfigController) SetupWithManager(mgr ctrl.Manager) error { return err } } - return nil - } func (r *PolicyConfigController) reconcileConfig(kind kube.Kind) reconcile.Func { diff --git a/pkg/configauditreport/controller/resource.go b/pkg/configauditreport/controller/resource.go index 5b384c7d6..ca31a1b73 100644 --- a/pkg/configauditreport/controller/resource.go +++ b/pkg/configauditreport/controller/resource.go @@ -10,10 +10,6 @@ import ( "time" "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -88,13 +84,13 @@ func (r *ResourceController) SetupWithManager(mgr ctrl.Manager) error { // Determine which Kubernetes workloads the controller will reconcile and add them to resources targetWorkloads := r.Config.GetTargetWorkloads() - targetWorkloads = append(targetWorkloads, strings.ToLower(string(kube.KindIngress))) - for _, tw := range targetWorkloads { - var resource kube.Resource - err = resource.GetWorkloadResource(tw, &v1alpha1.ConfigAuditReport{}, r.ObjectResolver) - if err != nil { - return err - } + + resources, clusterResources, err := kube.GetActiveResource(targetWorkloads, r.ObjectResolver, r.Scheme()) + if err != nil { + return fmt.Errorf("unable to setup resources for ResourceController: %w", err) + } + + for _, resource := range resources { resourceBuilder := r.buildControlMgr(mgr, resource, installModePredicate) if r.Config.InfraAssessmentScannerEnabled { resourceBuilder.Owns(&v1alpha1.InfraAssessmentReport{}) @@ -104,30 +100,6 @@ func (r *ResourceController) SetupWithManager(mgr ctrl.Manager) error { } } - // Add non workload related resources - resources := []kube.Resource{ - {Kind: kube.KindService, ForObject: &corev1.Service{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindConfigMap, ForObject: &corev1.ConfigMap{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindRole, ForObject: &rbacv1.Role{}, OwnsObject: &v1alpha1.RbacAssessmentReport{}}, - {Kind: kube.KindRoleBinding, ForObject: &rbacv1.RoleBinding{}, OwnsObject: &v1alpha1.RbacAssessmentReport{}}, - {Kind: kube.KindNetworkPolicy, ForObject: &networkingv1.NetworkPolicy{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindResourceQuota, ForObject: &corev1.ResourceQuota{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - {Kind: kube.KindLimitRange, ForObject: &corev1.LimitRange{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, - } - - for _, configResource := range resources { - if err := r.buildControlMgr(mgr, configResource, installModePredicate). - Complete(r.reconcileResource(configResource.Kind)); err != nil { - return fmt.Errorf("constructing controller for %s: %w", configResource.Kind, err) - } - } - - clusterResources := []kube.Resource{ - {Kind: kube.KindClusterRole, ForObject: &rbacv1.ClusterRole{}, OwnsObject: &v1alpha1.ClusterRbacAssessmentReport{}}, - {Kind: kube.KindClusterRoleBindings, ForObject: &rbacv1.ClusterRoleBinding{}, OwnsObject: &v1alpha1.ClusterRbacAssessmentReport{}}, - {Kind: kube.KindCustomResourceDefinition, ForObject: &apiextensionsv1.CustomResourceDefinition{}, OwnsObject: &v1alpha1.ClusterConfigAuditReport{}}, - } - for _, resource := range clusterResources { if err = ctrl.NewControllerManagedBy(mgr).WithOptions(controller.Options{ CacheSyncTimeout: r.CacheSyncTimeout, @@ -246,6 +218,7 @@ func (r *ResourceController) reconcileResource(resourceKind kube.Kind) reconcile // create config-audit report if !kube.IsRoleTypes(kube.Kind(kind)) || r.MergeRbacFindingWithConfigAudit { reportBuilder := configauditreport.NewReportBuilder(r.Client.Scheme()). + ScopeResolver(&r.ObjectResolver.K8sScope). Controller(resource). ResourceSpecHash(resourceHash). PluginConfigHash(policiesHash). @@ -261,6 +234,7 @@ func (r *ResourceController) reconcileResource(resourceKind kube.Kind) reconcile // create infra-assessment report if k8sCoreComponent(resource) && r.Config.InfraAssessmentScannerEnabled { infraReportBuilder := infraassessment.NewReportBuilder(r.Client.Scheme()). + ScopeResolver(&r.ObjectResolver.K8sScope). Controller(resource). ResourceSpecHash(resourceHash). PluginConfigHash(policiesHash). @@ -278,6 +252,7 @@ func (r *ResourceController) reconcileResource(resourceKind kube.Kind) reconcile // create rbac-assessment report if kube.IsRoleTypes(kube.Kind(kind)) && r.Config.RbacAssessmentScannerEnabled && !r.MergeRbacFindingWithConfigAudit { rbacReportBuilder := rbacassessment.NewReportBuilder(r.Client.Scheme()). + ScopeResolver(&r.ObjectResolver.K8sScope). Controller(resource). ResourceSpecHash(resourceHash). PluginConfigHash(policiesHash). @@ -366,7 +341,7 @@ func (r *ResourceController) hasReport(ctx context.Context, owner kube.ObjectRef if kube.IsRoleTypes(owner.Kind) { io = r.RbacReadWriter } - if kube.IsClusterScopedKind(string(owner.Kind)) { + if r.ObjectResolver.IsClusterScope(string(owner.Kind)) { hasClusterReport, err := r.hasClusterReport(ctx, owner, podSpecHash, pluginConfigHash, io) if err != nil { return false, err diff --git a/pkg/infraassessment/builder.go b/pkg/infraassessment/builder.go index 8ea936b9a..bd07933c2 100644 --- a/pkg/infraassessment/builder.go +++ b/pkg/infraassessment/builder.go @@ -35,6 +35,7 @@ type ReportBuilder struct { reportTTL *time.Duration resourceLabelsToInclude []string additionalReportLabels labels.Set + scopeResolver *kube.K8sScope } func NewReportBuilder(scheme *runtime.Scheme) *ReportBuilder { @@ -43,6 +44,11 @@ func NewReportBuilder(scheme *runtime.Scheme) *ReportBuilder { } } +func (b *ReportBuilder) ScopeResolver(sr *kube.K8sScope) *ReportBuilder { + b.scopeResolver = sr + return b +} + func (b *ReportBuilder) Controller(controller client.Object) *ReportBuilder { b.controller = controller return b @@ -175,7 +181,7 @@ func (b *ReportBuilder) GetClusterReport() (v1alpha1.ClusterInfraAssessmentRepor } func (b *ReportBuilder) Write(ctx context.Context, writer Writer) error { - if kube.IsClusterScopedKind(b.controller.GetObjectKind().GroupVersionKind().Kind) { + if b.scopeResolver.IsClusterScope(b.controller.GetObjectKind().GroupVersionKind().Kind) { report, err := b.GetClusterReport() if err != nil { return err diff --git a/pkg/kube/object.go b/pkg/kube/object.go index a03da52e0..c5cf20099 100644 --- a/pkg/kube/object.go +++ b/pkg/kube/object.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "slices" "strconv" "strings" @@ -17,6 +18,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/cli-runtime/pkg/genericclioptions" @@ -25,6 +27,7 @@ import ( "github.com/aquasecurity/trivy-operator/pkg/apis/aquasecurity/v1alpha1" "github.com/aquasecurity/trivy-operator/pkg/trivyoperator" + "github.com/aquasecurity/trivy/pkg/set" ) // ObjectRef is a simplified representation of a Kubernetes client.Object. @@ -91,26 +94,69 @@ func IsBuiltInWorkload(controller *metav1.OwnerReference) bool { // IsWorkload returns true if the specified resource kinds represents Kubernetes // workload, false otherwise. func IsWorkload(kind string) bool { - return kind == "Pod" || - kind == "Deployment" || - kind == "ReplicaSet" || - kind == "ReplicationController" || - kind == "StatefulSet" || - kind == "DaemonSet" || - kind == "Job" || - kind == "CronJob" -} - -// IsClusterScopedKind returns true if the specified kind is ClusterRole, -// ClusterRoleBinding, and CustomResourceDefinition. -// TODO Use discovery client to have a generic implementation. -func IsClusterScopedKind(kind string) bool { - switch kind { - case string(KindClusterRole), string(KindClusterRoleBindings), string(KindCustomResourceDefinition), string(KindNode): - return true - default: - return false + workloads := []string{"pod", "deployment", "replicaset", "replicationcontroller", "statefulset", "daemonset", "job", "cronjob"} + return slices.Contains(workloads, strings.ToLower(kind)) +} + +// GetActiveResource returns a list of active resources based on the target workloads. +// it includes namespaced and cluster-scope resources. +func GetActiveResource(targetWorkloads []string, objectResolver ObjectResolver, schm *runtime.Scheme) ([]Resource, []Resource, error) { + customTargets := set.New[string]() + workloadResources := make([]Resource, 0) + for _, tw := range targetWorkloads { + if !IsWorkload(tw) { + customTargets.Append(strings.ToLower(tw)) + continue + } + var resource Resource + if err := resource.GetWorkloadResource(tw, &v1alpha1.ConfigAuditReport{}, objectResolver); err != nil { + return nil, nil, fmt.Errorf("failed to get workload resource for %s: %w", tw, err) + } + workloadResources = append(workloadResources, resource) + } + resources := DefaultNonWorkloadResources() + resources = append(resources, workloadResources...) + + clusterResources := []Resource{ + {Kind: KindClusterRole, ForObject: &rbacv1.ClusterRole{}, OwnsObject: &v1alpha1.ClusterRbacAssessmentReport{}}, + {Kind: KindClusterRoleBindings, ForObject: &rbacv1.ClusterRoleBinding{}, OwnsObject: &v1alpha1.ClusterRbacAssessmentReport{}}, + {Kind: KindCustomResourceDefinition, ForObject: &apiextensionsv1.CustomResourceDefinition{}, OwnsObject: &v1alpha1.ClusterConfigAuditReport{}}, } + + if customTargets.Size() > 0 { + allKinds := schm.AllKnownTypes() + + for gvk := range allKinds { + kind := strings.ToLower(gvk.Kind) + if !customTargets.Contains(kind) { + continue + } + obj, err := schm.New(gvk) + if err != nil { + return nil, nil, fmt.Errorf("cannot create object for GVK %v: %w", gvk, err) + } + typedObj, ok := obj.(client.Object) + if !ok { + return nil, nil, fmt.Errorf("object does not implement client.Object: %T", obj) + } + if objectResolver.IsClusterScope(kind) { + // If the resource is not namespaced, we add it to clusterResources + clusterResources = append(clusterResources, Resource{ + Kind: Kind(gvk.Kind), + ForObject: typedObj, + OwnsObject: &v1alpha1.ClusterConfigAuditReport{}, + }) + continue + } + // If the resource is namespaced, we add it to resources + resources = append(resources, Resource{ + Kind: Kind(gvk.Kind), + ForObject: typedObj, + OwnsObject: &v1alpha1.ConfigAuditReport{}, + }) + } + } + return resources, clusterResources, nil } // ObjectRefToLabels encodes the specified ObjectRef as a set of labels. @@ -202,11 +248,11 @@ func ObjectRefFromKindAndObjectKey(kind Kind, name client.ObjectKey) ObjectRef { // adding it as the trivy-operator.LabelResourceSpecHash label to an instance of a // security report. func ComputeSpecHash(obj client.Object) (string, error) { - switch t := obj.(type) { + switch obj.(type) { case *corev1.Pod, *appsv1.Deployment, *appsv1.ReplicaSet, *corev1.ReplicationController, *appsv1.StatefulSet, *appsv1.DaemonSet, *batchv1.CronJob, *batchv1beta1.CronJob, *batchv1.Job: spec, err := GetPodSpec(obj) if err != nil { - return "", err + return "", fmt.Errorf("error getting pod spec for %T: %w", obj, err) } return ComputeHash(spec), nil case *corev1.Service: @@ -234,7 +280,7 @@ func ComputeSpecHash(obj client.Object) (string, error) { case *apiextensionsv1.CustomResourceDefinition: return ComputeHash(obj), nil default: - return "", fmt.Errorf("computing spec hash of unsupported object: %T", t) + return ComputeHash(obj), nil } } @@ -282,10 +328,11 @@ type CompatibleObjectMapper struct { type ObjectResolver struct { client.Client CompatibleMgr + K8sScope } func NewObjectResolver(c client.Client, cm CompatibleMgr) ObjectResolver { - return ObjectResolver{c, cm} + return ObjectResolver{c, cm, NewK8sScopeResolver(c)} } // InitCompatibleMgr initializes a CompatibleObjectMapper who store a map the of supported kinds with it compatible Objects (group/api/kind) @@ -379,7 +426,18 @@ func (o *ObjectResolver) ObjectFromObjectRef(ctx context.Context, ref ObjectRef) case KindClusterSbomReport: obj = &v1alpha1.ClusterSbomReport{} default: - return nil, fmt.Errorf("unknown kind: %s", ref.Kind) + gvk, ok := o.K8sScope.GVKbyKind(string(ref.Kind)) + if !ok { + return nil, fmt.Errorf("can't get GVK by kind %s", ref.Kind) + } + rawobj, err := o.Scheme().New(gvk) + if err != nil { + return nil, fmt.Errorf("cannot create object for GVK %v: %w", gvk, err) + } + obj, ok = rawobj.(client.Object) + if !ok { + return nil, fmt.Errorf("object does not implement client.Object: %T", obj) + } } err := o.Client.Get(ctx, client.ObjectKey{ Name: ref.Name, @@ -711,6 +769,19 @@ type Resource struct { OwnsObject client.Object } +func DefaultNonWorkloadResources() []Resource { + // Add non workload related resources + return []Resource{ + {Kind: KindService, ForObject: &corev1.Service{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, + {Kind: KindConfigMap, ForObject: &corev1.ConfigMap{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, + {Kind: KindRole, ForObject: &rbacv1.Role{}, OwnsObject: &v1alpha1.RbacAssessmentReport{}}, + {Kind: KindRoleBinding, ForObject: &rbacv1.RoleBinding{}, OwnsObject: &v1alpha1.RbacAssessmentReport{}}, + {Kind: KindNetworkPolicy, ForObject: &networkingv1.NetworkPolicy{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, + {Kind: KindResourceQuota, ForObject: &corev1.ResourceQuota{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, + {Kind: KindLimitRange, ForObject: &corev1.LimitRange{}, OwnsObject: &v1alpha1.ConfigAuditReport{}}, + } +} + // GetWorkloadResource returns a Resource object which can be used by controllers for reconciliation func (r *Resource) GetWorkloadResource(kind string, object client.Object, resolver ObjectResolver) error { @@ -740,13 +811,6 @@ func (r *Resource) GetWorkloadResource(kind string, object client.Object, resolv return nil } -func IsValidK8sKind(kind string) bool { - if IsWorkload(kind) || IsClusterScopedKind(kind) || IsRoleRelatedNamespaceScope(Kind(kind)) || isValidNamespaceResource(Kind(kind)) || kind == "Workload" { - return true - } - return false -} - func IsRoleRelatedNamespaceScope(kind Kind) bool { if kind == KindRole || kind == KindRoleBinding { return true @@ -760,10 +824,3 @@ func IsRoleTypes(kind Kind) bool { } return false } - -func isValidNamespaceResource(kind Kind) bool { - if kind == KindConfigMap || kind == KindNetworkPolicy || kind == KindIngress || kind == KindResourceQuota || kind == KindLimitRange || kind == KindService { - return true - } - return false -} diff --git a/pkg/kube/object_test.go b/pkg/kube/object_test.go index 380c09733..53fc2b7e7 100644 --- a/pkg/kube/object_test.go +++ b/pkg/kube/object_test.go @@ -112,6 +112,8 @@ func TestIsWorkload(t *testing.T) { } func TestIsClusterScopedKind(t *testing.T) { + sr := kube.NewK8sScopeResolver(fake.NewClientBuilder().WithScheme(trivyoperator.NewScheme()).Build()) + testCases := []struct { kind string want bool @@ -143,7 +145,7 @@ func TestIsClusterScopedKind(t *testing.T) { } for _, tt := range testCases { t.Run(fmt.Sprintf("Should return %t when controller kind is %s", tt.want, tt.kind), func(t *testing.T) { - assert.Equal(t, tt.want, kube.IsClusterScopedKind(tt.kind)) + assert.Equal(t, tt.want, sr.IsClusterScope(tt.kind)) }) } } @@ -1568,59 +1570,6 @@ func TestIsRoleTypes(t *testing.T) { } } -func TestIsValidK8sKinds(t *testing.T) { - testCases := []struct { - kind string - want bool - }{ - { - kind: "Pod", - want: true, - }, - { - kind: "ClusterRole", - want: true, - }, - { - kind: "Deployment", - want: true, - }, - { - kind: "ReplicationController", - want: true, - }, - { - kind: "StatefulSet", - want: true, - }, - { - kind: "Job", - want: true, - }, - { - kind: "Deployment", - want: true, - }, - { - kind: "CronJob", - want: true, - }, - { - kind: "CustomResourceDefinition", - want: true, - }, - { - kind: "Test", - want: false, - }, - } - for _, tt := range testCases { - t.Run(fmt.Sprintf("Should return %t when kind is %s", tt.want, tt.kind), func(t *testing.T) { - assert.Equal(t, tt.want, kube.IsValidK8sKind(tt.kind)) - }) - } -} - func TestIsRoleRelatedNamespaceScope(t *testing.T) { testCases := []struct { kind string diff --git a/pkg/kube/scope.go b/pkg/kube/scope.go new file mode 100644 index 000000000..6a2735f0c --- /dev/null +++ b/pkg/kube/scope.go @@ -0,0 +1,55 @@ +package kube + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type K8sScope struct { + clusterScopeKinds map[string]bool + gvkByKind map[string]schema.GroupVersionKind +} + +func NewK8sScopeResolver(c client.Client) K8sScope { + sr := K8sScope{ + clusterScopeKinds: make(map[string]bool), + gvkByKind: make(map[string]schema.GroupVersionKind), + } + + // add pre-defined cluster-scoped kinds + sr.clusterScopeKinds["clusterrole"] = true + sr.clusterScopeKinds["clusterrolebinding"] = true + sr.clusterScopeKinds["customresourcedefinition"] = true + + scm := c.Scheme() + if scm == nil { + return sr + } + mapper := c.RESTMapper() + allKinds := scm.AllKnownTypes() + for gvk := range allKinds { + mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + continue + } + kind := strings.ToLower(gvk.Kind) + sr.gvkByKind[kind] = gvk + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + sr.clusterScopeKinds[kind] = true + } + } + + return sr +} + +func (sr K8sScope) IsClusterScope(kind string) bool { + return sr.clusterScopeKinds[strings.ToLower(kind)] +} + +func (sr K8sScope) GVKbyKind(kind string) (schema.GroupVersionKind, bool) { + gvk, ok := sr.gvkByKind[strings.ToLower(kind)] + return gvk, ok +} diff --git a/pkg/kube/scope_test.go b/pkg/kube/scope_test.go new file mode 100644 index 000000000..49908b9e3 --- /dev/null +++ b/pkg/kube/scope_test.go @@ -0,0 +1,117 @@ +package kube + +import ( + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestNewK8sScopeResolver(t *testing.T) { + sch := runtime.NewScheme() + + if err := corev1.AddToScheme(sch); err != nil { + t.Fatalf("add core to scheme: %v", err) + } + if err := rbacv1.AddToScheme(sch); err != nil { + t.Fatalf("add rbac to scheme: %v", err) + } + if err := apiextv1.AddToScheme(sch); err != nil { + t.Fatalf("add apiext to scheme: %v", err) + } + + rm := meta.NewDefaultRESTMapper([]schema.GroupVersion{ + corev1.SchemeGroupVersion, + rbacv1.SchemeGroupVersion, + apiextv1.SchemeGroupVersion, + }) + rm.Add(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, meta.RESTScopeNamespace) + rm.Add(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, meta.RESTScopeNamespace) + rm.Add(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, meta.RESTScopeRoot) + rm.Add(schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}, meta.RESTScopeRoot) + rm.Add(schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, meta.RESTScopeRoot) + + c := fake.NewClientBuilder().WithScheme(sch).WithRESTMapper(rm).Build() + sr := NewK8sScopeResolver(c) + + tests := []struct { + name string + kind string + wantCluster bool + wantGVK schema.GroupVersionKind + wantGVKFound bool + }{ + { + name: "predefined only (ClusterRoleBinding)", + kind: "ClUsTeRrOlEbInDiNg", + wantCluster: true, + wantGVKFound: false, // not registered in mapper + }, + { + name: "mapped cluster-scoped CRD", + kind: "CustomResourceDefinition", + wantCluster: true, + wantGVK: schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}, + wantGVKFound: true, + }, + { + name: "mapped namespaced Pod", + kind: "pOd", + wantCluster: false, + wantGVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, + wantGVKFound: true, + }, + { + name: "mapped namespaced ConfigMap", + kind: "CoNfIgMaP", + wantCluster: false, + wantGVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}, + wantGVKFound: true, + }, + { + name: "mapped cluster-scoped Node", + kind: "Node", + wantCluster: true, + wantGVK: schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, + wantGVKFound: true, + }, + { + name: "mapped cluster-scoped ClusterRole", + kind: "ClUsTeRrOlE", + wantCluster: true, + wantGVK: schema.GroupVersionKind{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRole"}, + wantGVKFound: true, + }, + { + name: "unknown kind", + kind: "SomeUnknown", + wantCluster: false, + wantGVKFound: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := sr.IsClusterScope(tt.kind); got != tt.wantCluster { + t.Fatalf("IsClusterScope(%q)=%v want %v", tt.kind, got, tt.wantCluster) + } + gotGVK, ok := sr.GVKbyKind(tt.kind) + if ok != tt.wantGVKFound { + t.Fatalf("GVKbyKind(%q) found=%v want %v (gvk=%#v)", tt.kind, ok, tt.wantGVKFound, gotGVK) + } + if ok && gotGVK != tt.wantGVK { + t.Fatalf("GVKbyKind(%q)=%#v want %#v", tt.kind, gotGVK, tt.wantGVK) + } + // Sanity: keys are case-insensitive + if sr.IsClusterScope(strings.ToLower(tt.kind)) != sr.IsClusterScope(strings.ToUpper(tt.kind)) { + t.Fatalf("expected case-insensitive cluster scope lookup for %q", tt.kind) + } + }) + } +} diff --git a/pkg/operator/etc/config.go b/pkg/operator/etc/config.go index 91bb7aa0b..8d13ee691 100644 --- a/pkg/operator/etc/config.go +++ b/pkg/operator/etc/config.go @@ -54,7 +54,7 @@ type Config struct { WebhookBroadcastTimeout *time.Duration `env:"OPERATOR_WEBHOOK_BROADCAST_TIMEOUT" envDefault:"30s"` WebhookBroadcastCustomHeaders string `env:"OPERATOR_WEBHOOK_BROADCAST_CUSTOM_HEADERS"` WebhookSendDeletedReports bool `env:"OPERATOR_SEND_DELETED_REPORTS" envDefault:"false"` - TargetWorkloads string `env:"OPERATOR_TARGET_WORKLOADS" envDefault:"Pod,ReplicaSet,ReplicationController,StatefulSet,DaemonSet,CronJob,Job"` + TargetWorkloads string `env:"OPERATOR_TARGET_WORKLOADS" envDefault:"Pod,ReplicaSet,ReplicationController,StatefulSet,DaemonSet,CronJob,Job,PersistentVolume"` AccessGlobalSecretsAndServiceAccount bool `env:"OPERATOR_ACCESS_GLOBAL_SECRETS_SERVICE_ACCOUNTS" envDefault:"true"` PrivateRegistryScanSecretsNames string `env:"OPERATOR_PRIVATE_REGISTRY_SCAN_SECRETS_NAMES"` BuiltInTrivyServer bool `env:"OPERATOR_BUILT_IN_TRIVY_SERVER" envDefault:"false"` @@ -131,7 +131,7 @@ func (c Config) GetTargetWorkloads() []string { return strings.Split(strings.ToLower(workloads), ",") } - return []string{"pod", "replicaset", "replicationcontroller", "statefulset", "daemonset", "cronjob", "job"} + return []string{"pod", "replicaset", "replicationcontroller", "statefulset", "daemonset", "cronjob", "job", "persistentvolume"} } // InstallMode represents multitenancy support defined by the Operator Lifecycle Manager spec. diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index f63d72fb5..c6b1c8ee0 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -181,6 +181,7 @@ func Start(ctx context.Context, buildInfo trivyoperator.BuildInfo, operatorConfi if err != nil { return err } + objectResolver := kube.NewObjectResolver(mgr.GetClient(), compatibleObjectMapper) limitChecker := jobs.NewLimitChecker(operatorConfig, mgr.GetClient(), trivyOperatorConfig) logsReader := kube.NewLogsReader(clientSet) @@ -324,7 +325,7 @@ func Start(ctx context.Context, buildInfo trivyoperator.BuildInfo, operatorConfi return fmt.Errorf("unable to setup resource controller: %w", err) } if err = (&controller.PolicyConfigController{ - Logger: ctrl.Log.WithName("resourcecontroller"), + Logger: ctrl.Log.WithName("policyconfigcontroller"), Config: operatorConfig, PolicyLoader: policyLoader, ObjectResolver: objectResolver, diff --git a/pkg/rbacassessment/builder.go b/pkg/rbacassessment/builder.go index 7a21d2b31..1d1552a9e 100644 --- a/pkg/rbacassessment/builder.go +++ b/pkg/rbacassessment/builder.go @@ -35,6 +35,7 @@ type ReportBuilder struct { reportTTL *time.Duration resourceLabelsToInclude []string additionalReportLabels labels.Set + scopeResolver *kube.K8sScope } func NewReportBuilder(scheme *runtime.Scheme) *ReportBuilder { @@ -43,6 +44,11 @@ func NewReportBuilder(scheme *runtime.Scheme) *ReportBuilder { } } +func (b *ReportBuilder) ScopeResolver(sr *kube.K8sScope) *ReportBuilder { + b.scopeResolver = sr + return b +} + func (b *ReportBuilder) Controller(controller client.Object) *ReportBuilder { b.controller = controller return b @@ -175,7 +181,7 @@ func (b *ReportBuilder) GetReport() (v1alpha1.RbacAssessmentReport, error) { } func (b *ReportBuilder) Write(ctx context.Context, writer Writer) error { - if kube.IsClusterScopedKind(b.controller.GetObjectKind().GroupVersionKind().Kind) { + if b.scopeResolver != nil && b.scopeResolver.IsClusterScope(b.controller.GetObjectKind().GroupVersionKind().Kind) { report, err := b.GetClusterReport() if err != nil { return err diff --git a/pkg/utils/util.go b/pkg/utils/util.go index 423801881..0658bc546 100644 --- a/pkg/utils/util.go +++ b/pkg/utils/util.go @@ -11,9 +11,6 @@ func MapKinds(kinds []string) []string { set := hashset.New() updatedKinds := make([]string, 0) for _, kind := range kinds { - if !kube.IsValidK8sKind(kind) { - continue - } if kind == "Workload" { set.Add(string(kube.KindPod), string(kube.KindReplicationController), string(kube.KindReplicaSet), string(kube.KindStatefulSet), diff --git a/pkg/utils/util_test.go b/pkg/utils/util_test.go index b4849d8e6..03b8b9d32 100644 --- a/pkg/utils/util_test.go +++ b/pkg/utils/util_test.go @@ -15,7 +15,6 @@ func TestMapResources(t *testing.T) { {name: "with workload", kinds: []string{"Workload"}, want: 8}, {name: "dup kinds", kinds: []string{"Workload", "Pod", "Job"}, want: 8}, {name: "empty kinds", kinds: []string{}, want: 0}, - {name: "non valid kinds", kinds: []string{"Pod", "Koko"}, want: 1}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/vulnerabilityreport/controller/workload.go b/pkg/vulnerabilityreport/controller/workload.go index fd4dd7c84..fe7cd38a9 100644 --- a/pkg/vulnerabilityreport/controller/workload.go +++ b/pkg/vulnerabilityreport/controller/workload.go @@ -94,6 +94,10 @@ func (r *WorkloadController) SetupWithManager(mgr ctrl.Manager) error { // Determine which Kubernetes workloads the controller will reconcile and add them to resources targetWorkloads := r.Config.GetTargetWorkloads() for _, tw := range targetWorkloads { + // TargetWorkloads may contain custom workload kinds that are not supported by the vulnerability controller. + if !kube.IsWorkload(tw) { + continue + } var resource kube.Resource err := resource.GetWorkloadResource(tw, nil, r.ObjectResolver) if err != nil { diff --git a/tests/itest/helper/helper.go b/tests/itest/helper/helper.go index eabeb06b9..c6d6357b3 100644 --- a/tests/itest/helper/helper.go +++ b/tests/itest/helper/helper.go @@ -379,6 +379,26 @@ func (h *Helper) HasConfigAuditReportOwnedBy(ctx context.Context, obj client.Obj } } +func (h *Helper) HasClusterConfigAuditReportOwnedBy(ctx context.Context, obj client.Object) func() (bool, error) { + return func() (bool, error) { + gvk, err := apiutil.GVKForObject(obj, h.scheme) + if err != nil { + return false, err + } + var reportsList v1alpha1.ClusterConfigAuditReportList + err = h.kubeClient.List(ctx, &reportsList, client.MatchingLabels{ + trivyoperator.LabelResourceKind: gvk.Kind, + trivyoperator.LabelResourceName: obj.GetName(), + trivyoperator.LabelResourceNamespace: obj.GetNamespace(), + }) + if err != nil { + return false, err + } + + return len(reportsList.Items) == 1 && reportsList.Items[0].DeletionTimestamp == nil, nil + } +} + func (h *Helper) HasScanJobPodOwnedBy(ctx context.Context, obj client.Object) func() (bool, error) { return func() (bool, error) { gvk, err := apiutil.GVKForObject(obj, h.scheme) diff --git a/tests/itest/trivy-operator/behavior/behavior.go b/tests/itest/trivy-operator/behavior/behavior.go index cfe6ef822..1a37eb438 100644 --- a/tests/itest/trivy-operator/behavior/behavior.go +++ b/tests/itest/trivy-operator/behavior/behavior.go @@ -3,16 +3,19 @@ package behavior import ( "context" "errors" + "strings" "time" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/rand" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/aquasecurity/trivy-operator/pkg/trivyoperator" "github.com/aquasecurity/trivy-operator/tests/itest/helper" . "github.com/onsi/ginkgo/v2" @@ -456,6 +459,183 @@ func ConfigurationCheckerBehavior(inputs *Inputs) func() { }) }) + + Context("When PersistentVolume is created", func() { + + var ctx context.Context + var pv *corev1.PersistentVolume + + BeforeEach(func() { + ctx = context.Background() + pv = &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-" + rand.String(5), + }, + Spec: corev1.PersistentVolumeSpec{ + Capacity: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + PersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete, + PersistentVolumeSource: corev1.PersistentVolumeSource{ + HostPath: &corev1.HostPathVolumeSource{Path: "/tmp"}, + }, + }, + } + + err := inputs.Create(ctx, pv) + Expect(err).ToNot(HaveOccurred()) + }) + + It("Should create ClusterConfigAuditReport", func() { + Eventually(inputs.HasClusterConfigAuditReportOwnedBy(ctx, pv), inputs.AssertTimeout).Should(BeTrue()) + }) + + AfterEach(func() { + err := inputs.Delete(ctx, pv) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + Context("When PersistentVolumeClaim is created", func() { + + var ctx context.Context + var pvc *corev1.PersistentVolumeClaim + + BeforeEach(func() { + ctx = context.Background() + qty := resource.MustParse("1Gi") + pvc = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: inputs.PrimaryNamespace, + Name: "pvc-" + rand.String(5), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: qty, + }, + }, + }, + } + err := inputs.Create(ctx, pvc) + Expect(err).ToNot(HaveOccurred()) + }) + + It("Should create ConfigAuditReport", func() { + Eventually(inputs.HasConfigAuditReportOwnedBy(ctx, pvc), inputs.AssertTimeout).Should(BeTrue()) + }) + + AfterEach(func() { + err := inputs.Delete(ctx, pvc) + Expect(err).ToNot(HaveOccurred()) + }) + }) + + Context("When PersistentVolumeClaim scanning is disabled", func() { + + var ( + ctx context.Context + pluginCM *corev1.ConfigMap + pvc *corev1.PersistentVolumeClaim + originalKinds string + cleanupPVC func() + restoreConfig func() + ) + + findPluginConfigMap := func(ctx context.Context) (*corev1.ConfigMap, error) { + var cms corev1.ConfigMapList + if err := inputs.Client.List(ctx, &cms, &client.ListOptions{}); err != nil { + return nil, err + } + name := trivyoperator.GetPluginConfigMapName("Trivy") + for i := range cms.Items { + cm := cms.Items[i] + if cm.Name == name { + cpy := cm.DeepCopy() + return cpy, nil + } + } + return nil, nil + } + + BeforeEach(func() { + ctx = context.Background() + + var err error + pluginCM, err = findPluginConfigMap(ctx) + Expect(err).ToNot(HaveOccurred()) + Expect(pluginCM).ToNot(BeNil(), "plugin configmap not found") + + if pluginCM.Data == nil { + pluginCM.Data = make(map[string]string) + } + originalKinds = pluginCM.Data["trivy.supportedConfigAuditKinds"] + + kinds := originalKinds + if kinds == "" { + kinds = "Workload,Service,Role,ClusterRole,NetworkPolicy,Ingress,LimitRange,ResourceQuota" + } + parts := []string{} + for _, k := range strings.Split(kinds, ",") { + k = strings.TrimSpace(k) + if k == "PersistentVolumeClaim" { + continue + } + parts = append(parts, k) + } + pluginCM.Data["trivy.supportedConfigAuditKinds"] = strings.Join(parts, ",") + err = inputs.Client.Update(ctx, pluginCM) + Expect(err).ToNot(HaveOccurred()) + + // allow operator to reload config + time.Sleep(2 * time.Second) + + qty := resource.MustParse("1Gi") + pvc = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: inputs.PrimaryNamespace, + Name: "pvc-" + rand.String(5), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: qty, + }, + }, + }, + } + err = inputs.Create(ctx, pvc) + Expect(err).ToNot(HaveOccurred()) + + cleanupPVC = func() { _ = inputs.Delete(ctx, pvc) } + restoreConfig = func() { + cm, err := findPluginConfigMap(ctx) + if err == nil && cm != nil { + if cm.Data == nil { + cm.Data = make(map[string]string) + } + cm.Data["trivy.supportedConfigAuditKinds"] = originalKinds + _ = inputs.Client.Update(ctx, cm) + } + } + }) + + AfterEach(func() { + if cleanupPVC != nil { + cleanupPVC() + } + if restoreConfig != nil { + restoreConfig() + } + }) + + It("Should not create ConfigAuditReport for PVC", func() { + Consistently(inputs.HasConfigAuditReportOwnedBy(ctx, pvc), time.Minute, inputs.PollingInterval).Should(BeFalse()) + }) + }) } } diff --git a/tests/itest/trivy-operator/suite_test.go b/tests/itest/trivy-operator/suite_test.go index ead048b72..641f92c46 100644 --- a/tests/itest/trivy-operator/suite_test.go +++ b/tests/itest/trivy-operator/suite_test.go @@ -2,10 +2,12 @@ package trivy_operator import ( "context" + "strings" "testing" "time" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -14,6 +16,7 @@ import ( "github.com/aquasecurity/trivy-operator/pkg/operator" "github.com/aquasecurity/trivy-operator/pkg/operator/etc" + "github.com/aquasecurity/trivy-operator/pkg/plugins/trivy" "github.com/aquasecurity/trivy-operator/pkg/trivyoperator" "github.com/aquasecurity/trivy-operator/tests/itest/helper" "github.com/aquasecurity/trivy-operator/tests/itest/trivy-operator/behavior" @@ -66,6 +69,30 @@ var _ = BeforeSuite(func() { }) Expect(err).ToNot(HaveOccurred()) + installMode, operatorNamespace, _, err := operatorConfig.ResolveInstallMode() + Expect(err).ToNot(HaveOccurred(), "install mode: %s", installMode) + + pluginCM := &corev1.ConfigMap{} + pluginCM.Namespace = operatorNamespace + pluginCM.Name = trivyoperator.GetPluginConfigMapName("Trivy") + _ = kubeClient.Delete(context.Background(), pluginCM) + pluginCM = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: operatorNamespace, + Name: trivyoperator.GetPluginConfigMapName("Trivy"), + }, + Data: map[string]string{ + "trivy.useBuiltinRegoPolicies": "true", + "trivy.supportedConfigAuditKinds": "Workload,Service,Role,ClusterRole,NetworkPolicy,Ingress,LimitRange,ResourceQuota,PersistentVolume,PersistentVolumeClaim", + "trivy.repository": trivy.DefaultImageRepository, + "trivy.tag": "0.65.0", + "trivy.dbRepository": trivy.DefaultDBRepository, + "trivy.javaDbRepository": trivy.DefaultJavaDBRepository, + }, + } + _ = kubeClient.Create(context.Background(), pluginCM) + _ = kubeClient.Update(context.Background(), pluginCM) + inputs = behavior.Inputs{ AssertTimeout: 5 * time.Minute, PollingInterval: 5 * time.Second, @@ -90,6 +117,12 @@ func ApplyTestConfiguration(operatorConfig *etc.Config) { // Default is 0. Set to 30 seconds for testing scan job TTL behavior. scanJobTTL := 30 * time.Second operatorConfig.ScanJobTTL = &scanJobTTL + + if operatorConfig.TargetWorkloads == "" { + operatorConfig.TargetWorkloads = "Pod,ReplicaSet,ReplicationController,StatefulSet,DaemonSet,CronJob,Job,PersistentVolume,PersistentVolumeClaim" + } else if !strings.Contains(operatorConfig.TargetWorkloads, "PersistentVolumeClaim") { + operatorConfig.TargetWorkloads += ",PersistentVolumeClaim" + } } var _ = AfterSuite(func() {