From f06936493e99e5f48b3d348d2c414ada523b2bd7 Mon Sep 17 00:00:00 2001 From: Soumya Ghosh Dastidar <44349253+gdsoumya@users.noreply.github.com> Date: Mon, 14 Nov 2022 12:49:55 +0530 Subject: [PATCH] feat: add resource name filtering in k8s probe (#598) * feat: add resource name filtering in k8s probe Signed-off-by: Soumya Ghosh Dastidar Signed-off-by: Rocio Roman --- .../azure-disk-loss/lib/azure-disk-loss.go | 2 +- .../node-memory-hog/lib/node-memory-hog.go | 2 +- go.mod | 2 +- go.sum | 4 +- pkg/clients/clientset.go | 2 +- pkg/probe/k8sprobe.go | 139 ++++++++++++++---- 6 files changed, 115 insertions(+), 36 deletions(-) diff --git a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go b/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go index 867506603..a2d672bc0 100644 --- a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go +++ b/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go @@ -275,7 +275,7 @@ func abortWatcher(experimentsDetails *experimentTypes.ExperimentDetails, attache } if diskStatusString != "Attached" { if err := diskStatus.AttachDisk(experimentsDetails.SubscriptionID, experimentsDetails.ResourceGroup, instanceName, experimentsDetails.ScaleSet, diskList); err != nil { - log.Errorf("failed to attach disk '%v, manual revert required, err: %v", err) + log.Errorf("failed to attach disk, manual revert required, err: %v", err) } else { common.SetTargets(*disk.Name, "re-attached", "VirtualDisk", chaosDetails) } diff --git a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go b/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go index f2321cfb1..fb4691cd5 100644 --- a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go +++ b/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go @@ -284,7 +284,7 @@ func calculateMemoryConsumption(experimentsDetails *experimentTypes.ExperimentDe //Get the percentage of memory under chaos wrt allocatable memory totalMemoryConsumption = int((float64(memoryForChaos) / float64(memoryAllocatable)) * 100) if totalMemoryConsumption > 100 { - log.Infof("[Info]: PercentageOfMemoryCapacity To Be Used: %d percent, which is more than 100 percent (%d percent) of Allocatable Memory, so the experiment will only consume upto 100 percent of Allocatable Memory", experimentsDetails.MemoryConsumptionPercentage, totalMemoryConsumption) + log.Infof("[Info]: PercentageOfMemoryCapacity To Be Used: %v percent, which is more than 100 percent (%d percent) of Allocatable Memory, so the experiment will only consume upto 100 percent of Allocatable Memory", experimentsDetails.MemoryConsumptionPercentage, totalMemoryConsumption) MemoryConsumption = "100%" } else { log.Infof("[Info]: PercentageOfMemoryCapacity To Be Used: %v percent, which is %d percent of Allocatable Memory", experimentsDetails.MemoryConsumptionPercentage, totalMemoryConsumption) diff --git a/go.mod b/go.mod index b73b59481..453a174d5 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/aws/aws-sdk-go v1.38.59 github.com/containerd/cgroups v1.0.1 github.com/kyokomi/emoji v2.2.4+incompatible - github.com/litmuschaos/chaos-operator v0.0.0-20220929101337-868b2827f820 + github.com/litmuschaos/chaos-operator v0.0.0-20221114055503-3d12d34d2032 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.7.0 github.com/spf13/cobra v1.1.1 diff --git a/go.sum b/go.sum index f63cf932f..a0608613c 100644 --- a/go.sum +++ b/go.sum @@ -767,8 +767,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/litmuschaos/chaos-operator v0.0.0-20220929101337-868b2827f820 h1:xMlb6eMbWzdR/2IB6F095p0NDadccZIkiovJBE9fg9I= -github.com/litmuschaos/chaos-operator v0.0.0-20220929101337-868b2827f820/go.mod h1:CJGiHqC06PQkIBySk/JroB7B2zFebDbkhQ1A6ZbYmHA= +github.com/litmuschaos/chaos-operator v0.0.0-20221114055503-3d12d34d2032 h1:VeVpXvz5JVj28rQZs4DI101b+vVKHIKlUNWGfbDF6V0= +github.com/litmuschaos/chaos-operator v0.0.0-20221114055503-3d12d34d2032/go.mod h1:CJGiHqC06PQkIBySk/JroB7B2zFebDbkhQ1A6ZbYmHA= github.com/litmuschaos/elves v0.0.0-20201107015738-552d74669e3c/go.mod h1:DsbHGNUq/78NZozWVVI9Q6eBei4I+JjlkkD5aibJ3MQ= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= diff --git a/pkg/clients/clientset.go b/pkg/clients/clientset.go index 732038dc8..5cc252638 100644 --- a/pkg/clients/clientset.go +++ b/pkg/clients/clientset.go @@ -83,7 +83,7 @@ func buildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, if err == nil { return kubeconfig, nil } - klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. Error creating inClusterConfig: ", err) + klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. Error creating inClusterConfig: %v", err) } return clientcmd.NewNonInteractiveDeferredLoadingClientConfig( &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath}, diff --git a/pkg/probe/k8sprobe.go b/pkg/probe/k8sprobe.go index 443bbc809..c1079051f 100644 --- a/pkg/probe/k8sprobe.go +++ b/pkg/probe/k8sprobe.go @@ -2,6 +2,7 @@ package probe import ( "context" + k8serrors "k8s.io/apimachinery/pkg/api/errors" "strings" "time" @@ -42,7 +43,7 @@ func prepareK8sProbe(probe v1alpha1.ProbeAttributes, resultDetails *types.Result // triggerK8sProbe run the k8s probe command func triggerK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets, resultDetails *types.ResultDetails) error { - inputs := probe.K8sProbeInputs + inputs := &probe.K8sProbeInputs // It parse the templated command and return normal string // if command doesn't have template, it will return the same command @@ -56,6 +57,19 @@ func triggerK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets, return err } + inputs.ResourceNames, err = parseCommand(inputs.ResourceNames, resultDetails) + if err != nil { + return err + } + + parsedResourceNames := []string{} + if inputs.ResourceNames != "" { + parsedResourceNames = strings.Split(inputs.ResourceNames, ",") + for i := range parsedResourceNames { + parsedResourceNames[i] = strings.TrimSpace(parsedResourceNames[i]) + } + } + // it will retry for some retry count, in each iterations of try it contains following things // it contains a timeout per iteration of retry. if the timeout expires without success then it will go to next try // for a timeout, it will run the command, if it fails wait for the iterval and again execute the command until timeout expires @@ -77,32 +91,19 @@ func triggerK8sProbe(probe v1alpha1.ProbeAttributes, clients clients.ClientSets, return err } case "delete": - if err = deleteResource(probe, gvr, clients); err != nil { + if err = deleteResource(probe, gvr, parsedResourceNames, clients); err != nil { log.Errorf("the %v k8s probe has Failed, err: %v", probe.Name, err) return err } case "present": - resourceList, err := clients.DynamicClient.Resource(gvr).Namespace(inputs.Namespace).List(context.Background(), v1.ListOptions{ - FieldSelector: inputs.FieldSelector, - LabelSelector: inputs.LabelSelector, - }) - if err != nil { + if err = resourcesPresent(probe, gvr, parsedResourceNames, clients); err != nil { log.Errorf("the %v k8s probe has Failed, err: %v", probe.Name, err) - return errors.Errorf("unable to list the resources with matching selector, err: %v", err) - } else if len(resourceList.Items) == 0 { - return errors.Errorf("no resource found with provided selectors") + return err } case "absent": - resourceList, err := clients.DynamicClient.Resource(gvr).Namespace(inputs.Namespace).List(context.Background(), v1.ListOptions{ - FieldSelector: inputs.FieldSelector, - LabelSelector: inputs.LabelSelector, - }) - if err != nil { - return errors.Errorf("unable to list the resources with matching selector, err: %v", err) - } - if len(resourceList.Items) != 0 { + if err = resourcesAbsent(probe, gvr, parsedResourceNames, clients); err != nil { log.Errorf("the %v k8s probe has Failed, err: %v", probe.Name, err) - return errors.Errorf("resource is not deleted yet due to, err: %v", err) + return err } default: return errors.Errorf("operation type '%s' not supported in the k8s probe", inputs.Operation) @@ -165,21 +166,99 @@ func createResource(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResou } // deleteResource deletes the resource with matching label & field selector -func deleteResource(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResource, clients clients.ClientSets) error { - resourceList, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).List(context.Background(), v1.ListOptions{ - FieldSelector: probe.K8sProbeInputs.FieldSelector, - LabelSelector: probe.K8sProbeInputs.LabelSelector, - }) - if err != nil { - return errors.Errorf("unable to list the resources with matching selector, err: %v", err) - } else if len(resourceList.Items) == 0 { - return errors.Errorf("no resource found with provided selectors") +func deleteResource(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResource, parsedResourceNames []string, clients clients.ClientSets) error { + // resource name has higher priority + if len(parsedResourceNames) > 0 { + // check if all resources are available + if err := areResourcesWithNamePresent(probe, gvr, parsedResourceNames, clients); err != nil { + return err + } + // delete resources + for _, res := range parsedResourceNames { + if err = clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Delete(context.Background(), res, v1.DeleteOptions{}); err != nil { + return err + } + } + } else { + resourceList, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).List(context.Background(), v1.ListOptions{ + FieldSelector: probe.K8sProbeInputs.FieldSelector, + LabelSelector: probe.K8sProbeInputs.LabelSelector, + }) + if err != nil { + return errors.Errorf("unable to list the resources with matching selector, err: %v", err) + } else if len(resourceList.Items) == 0 { + return errors.Errorf("no resource found with provided selectors") + } + + for index := range resourceList.Items { + if err = clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Delete(context.Background(), resourceList.Items[index].GetName(), v1.DeleteOptions{}); err != nil { + return err + } + } } + return nil +} - for index := range resourceList.Items { - if err = clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Delete(context.Background(), resourceList.Items[index].GetName(), v1.DeleteOptions{}); err != nil { +func resourcesPresent(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResource, parsedResourceNames []string, clients clients.ClientSets) error { + // resource name has higher priority + if len(parsedResourceNames) > 0 { + // check if all resources are available + if err := areResourcesWithNamePresent(probe, gvr, parsedResourceNames, clients); err != nil { return err } + } else { + resourceList, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).List(context.Background(), v1.ListOptions{ + FieldSelector: probe.K8sProbeInputs.FieldSelector, + LabelSelector: probe.K8sProbeInputs.LabelSelector, + }) + if err != nil { + log.Errorf("the %v k8s probe has Failed, err: %v", probe.Name, err) + return errors.Errorf("unable to list the resources with matching selector, err: %v", err) + } else if len(resourceList.Items) == 0 { + return errors.Errorf("no resource found with provided selectors") + } + } + return nil +} + +func areResourcesWithNamePresent(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResource, parsedResourceNames []string, clients clients.ClientSets) error { + for _, res := range parsedResourceNames { + resource, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Get(context.Background(), res, v1.GetOptions{}) + if err != nil { + return errors.Errorf("unable to get the resources with name %v, err: %v", res, err) + } else if resource == nil { + return errors.Errorf("unable to get the resources with name %v", res) + } + } + return nil +} + +func resourcesAbsent(probe v1alpha1.ProbeAttributes, gvr schema.GroupVersionResource, parsedResourceNames []string, clients clients.ClientSets) error { + // resource name has higher priority + if len(parsedResourceNames) > 0 { + // check if all resources are absent + for _, res := range parsedResourceNames { + resource, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).Get(context.Background(), res, v1.GetOptions{}) + if err != nil { + // ignore not found error, that is the expected outcome + if !k8serrors.IsNotFound(err) { + return errors.Errorf("unable to get the resources with name %v from k8s, err: %v", res, err) + } + } else if resource != nil { + return errors.Errorf("resource '%v' still exists but is expected to be absent", res) + } + } + } else { + resourceList, err := clients.DynamicClient.Resource(gvr).Namespace(probe.K8sProbeInputs.Namespace).List(context.Background(), v1.ListOptions{ + FieldSelector: probe.K8sProbeInputs.FieldSelector, + LabelSelector: probe.K8sProbeInputs.LabelSelector, + }) + if err != nil { + return errors.Errorf("unable to list the resources with matching selector, err: %v", err) + } + if len(resourceList.Items) != 0 { + return errors.Errorf("resource with provided selectors still exists, found %v resources with matching selectors", len(resourceList.Items)) + } } return nil }