diff --git a/faults/aws/experiments.yaml b/faults/aws/experiments.yaml
index 1df62974e..697393c1c 100644
--- a/faults/aws/experiments.yaml
+++ b/faults/aws/experiments.yaml
@@ -2,12 +2,12 @@
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Detaching an EBS volume from EC2 instance for a certain chaos duration.
+    Stopping an EC2 instance identified by tag.
 kind: ChaosExperiment
 metadata:
-  name: ebs-loss-by-id
+  name: ec2-stop-by-tag
   labels:
-    name: ebs-loss-by-id
+    name: ec2-stop-by-tag
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -52,11 +52,15 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name ebs-loss-by-id
+      - ./experiments -name ec2-terminate-by-tag
     command:
       - /bin/bash
     env:
@@ -70,12 +74,21 @@ spec:
       - name: RAMP_TIME
         value: ""
 
-      - name: EBS_VOLUME_ID
+      # Provide a common tag to target ec2 instances
+      - name: EC2_INSTANCE_TAG
         value: ""
 
+      # enable it if the target instance is a part of self-managed nodegroup.
+      - name: MANAGED_NODEGROUP
+        value: "disable"
+
       - name: REGION
         value: ""
 
+      # Target the percentage of instance filtered from tag
+      - name: INSTANCE_AFFECTED_PERC
+        value: ""
+
       - name: SEQUENCE
         value: "parallel"
 
@@ -86,8 +99,11 @@ spec:
       - name: AWS_SHARED_CREDENTIALS_FILE
         value: "/tmp/cloud_config.yml"
 
+      - name: SEQUENCE
+        value: "parallel"
+
     labels:
-      name: ebs-loss-by-id
+      name: ec2-stop-by-tag
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -100,12 +116,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Execute AWS SSM Chaos on given EC2 instance Tag
+    Detaching an EBS volume from EC2 instance.
 kind: ChaosExperiment
 metadata:
-  name: aws-ssm-chaos-by-tag
+  name: ebs-loss-by-tag
   labels:
-    name: aws-ssm-chaos-by-tag
+    name: ebs-loss-by-tag
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -154,83 +170,40 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name aws-ssm-chaos-by-tag
+      - ./experiments -name ebs-loss-by-tag
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "60"
+        value: "30"
 
       - name: CHAOS_INTERVAL
-        value: "60"
+        value: "30"
 
-      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-        # provide tag of the target EC2 instances
-        # ex: team:devops (key:value)
-      - name: EC2_INSTANCE_TAG
+      - name: EBS_VOLUME_TAG
         value: ""
 
       - name: REGION
         value: ""
 
-      # it defines the sequence of chaos execution for multiple target instances
-      # supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
-      # Provide the path of AWS credentials mounted from secret
-      - name: AWS_SHARED_CREDENTIALS_FILE
-        value: "/tmp/cloud_config.yml"
-
-      # percentage of total instance to target
-      - name: INSTANCE_AFFECTED_PERC
+      - name: VOLUME_AFFECTED_PERC
         value: ""
 
-      # provide the number of workers for memory stress
-      - name: NUMBER_OF_WORKERS
-        value: "1"
-
-      # provide the percentage of available memory to stress
-      - name: MEMORY_PERCENTAGE
-        value: "80"
-
-      # provide the CPU chores to comsumed
-      # 0 will consume all the available CPU cores
-      - name: CPU_CORE
-        value: "0"
-
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      # Provide the name of SSM doc
-      # if not using the default stress docs
-      - name: DOCUMENT_NAME
-        value: "LitmusChaos-AWS-SSM-Doc"
-
-      # Provide the type of SSM doc
-      # if not using the default stress docs
-      - name: DOCUMENT_TYPE
-        value: "Command"
-
-      # Provide the format of SSM doc
-      # if not using the default stress docs
-      - name: DOCUMENT_FORMAT
-        value: "YAML"
-
-      # Provide the path of SSM doc
-      # if not using the default stress docs
-      - name: DOCUMENT_PATH
-        value: "Litmus-AWS-SSM-Docs-For-EC2-CPU-Hog.yml"
-
-        # if you want to install dependencies to run default SSM docs
-      - name: INSTALL_DEPENDENCIES
-        value: "True"
+      # Provide the path of AWS credentials mounted from secret
+      - name: AWS_SHARED_CREDENTIALS_FILE
+        value: "/tmp/cloud_config.yml"
 
     labels:
-      name: aws-ssm-chaos-by-tag
+      name: ebs-loss-by-tag
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -243,12 +216,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Stopping an EC2 instance identified by tag.
+    Execute AWS SSM Chaos on given EC2 instance IDs
 kind: ChaosExperiment
 metadata:
-  name: ec2-stop-by-tag
+  name: aws-ssm-chaos-by-id
   labels:
-    name: ec2-stop-by-tag
+    name: aws-ssm-chaos-by-id
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -293,58 +266,83 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name ec2-terminate-by-tag
+      - ./experiments -name aws-ssm-chaos-by-id
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "30"
+        value: "60"
 
       - name: CHAOS_INTERVAL
-        value: "30"
+        value: "60"
 
       # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # Provide a common tag to target ec2 instances
-      - name: EC2_INSTANCE_TAG
+      # Instance ID of the target EC2 instance
+      # Multiple IDs can also be provided as comma separated values ex: id1,id2
+      - name: EC2_INSTANCE_ID
         value: ""
 
-      # enable it if the target instance is a part of self-managed nodegroup.
-      - name: MANAGED_NODEGROUP
-        value: "disable"
-
       - name: REGION
         value: ""
 
-      # Target the percentage of instance filtered from tag
-      - name: INSTANCE_AFFECTED_PERC
-        value: ""
-
+      # it defines the sequence of chaos execution for multiple target instances
+      # supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
       # Provide the path of AWS credentials mounted from secret
       - name: AWS_SHARED_CREDENTIALS_FILE
         value: "/tmp/cloud_config.yml"
 
-      - name: SEQUENCE
-        value: "parallel"
+      # provide the number of workers for memory stress
+      - name: NUMBER_OF_WORKERS
+        value: "1"
+
+      # provide the percentage of available memory to stress
+      - name: MEMORY_PERCENTAGE
+        value: "80"
+
+      # provide the CPU chores to be consumed
+      # 0 will consume all the available CPU cores
+      - name: CPU_CORE
+        value: "0"
+
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
+      # Provide the name of SSM doc
+      # if not using the default stress docs
+      - name: DOCUMENT_NAME
+        value: "LitmusChaos-AWS-SSM-Doc"
+
+      # Provide the type of SSM doc
+      # if not using the default stress docs
+      - name: DOCUMENT_TYPE
+        value: "Command"
+
+      # Provide the format of SSM doc
+      # if not using the default stress docs
+      - name: DOCUMENT_FORMAT
+        value: "YAML"
+
+      # Provide the path of SSM doc
+      # if not using the default stress docs
+      - name: DOCUMENT_PATH
+        value: "Litmus-AWS-SSM-Docs-For-EC2-CPU-Hog.yml"
+
+        # if you want to install dependencies to run default SSM docs
+      - name: INSTALL_DEPENDENCIES
+        value: "True"
 
     labels:
-      name: ec2-stop-by-tag
+      name: aws-ssm-chaos-by-id
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -357,12 +355,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Execute AWS SSM Chaos on given EC2 instance IDs
+    Execute AWS SSM Chaos on given EC2 instance Tag
 kind: ChaosExperiment
 metadata:
-  name: aws-ssm-chaos-by-id
+  name: aws-ssm-chaos-by-tag
   labels:
-    name: aws-ssm-chaos-by-id
+    name: aws-ssm-chaos-by-tag
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -411,7 +409,7 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name aws-ssm-chaos-by-id
+      - ./experiments -name aws-ssm-chaos-by-tag
     command:
       - /bin/bash
     env:
@@ -425,9 +423,9 @@ spec:
       - name: RAMP_TIME
         value: ""
 
-      # Instance ID of the target EC2 instance
-      # Multiple IDs can also be provided as comma separated values ex: id1,id2
-      - name: EC2_INSTANCE_ID
+        # provide tag of the target EC2 instances
+        # ex: team:devops (key:value)
+      - name: EC2_INSTANCE_TAG
         value: ""
 
       - name: REGION
@@ -442,6 +440,10 @@ spec:
       - name: AWS_SHARED_CREDENTIALS_FILE
         value: "/tmp/cloud_config.yml"
 
+      # percentage of total instance to target
+      - name: INSTANCE_AFFECTED_PERC
+        value: ""
+
       # provide the number of workers for memory stress
       - name: NUMBER_OF_WORKERS
         value: "1"
@@ -450,7 +452,7 @@ spec:
       - name: MEMORY_PERCENTAGE
         value: "80"
 
-      # provide the CPU chores to be consumed
+      # provide the CPU chores to comsumed
       # 0 will consume all the available CPU cores
       - name: CPU_CORE
         value: "0"
@@ -483,7 +485,7 @@ spec:
         value: "True"
 
     labels:
-      name: aws-ssm-chaos-by-id
+      name: aws-ssm-chaos-by-tag
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -496,73 +498,102 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It injects AZ chaos to detach the target zones from the load balancer.
+    Stopping an EC2 instance identified by ID.
 kind: ChaosExperiment
 metadata:
-  name: aws-az-chaos
+  name: ec2-stop-by-id
   labels:
-    name: aws-az-chaos
+    name: ec2-stop-by-id
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Namespaced
+    scope: Cluster
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
         resources: ["pods"]
-        verbs: ["create","delete","get","list","patch","update", "deletecollection"]
+        verbs:
+          [
+            "create",
+            "delete",
+            "get",
+            "list",
+            "patch",
+            "update",
+            "deletecollection",
+          ]
       # Performs CRUD operations on the events inside chaosengine and chaosresult
       - apiGroups: [""]
         resources: ["events"]
-        verbs: ["create","get","list","patch","update"]
+        verbs: ["create", "get", "list", "patch", "update"]
       # Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
       - apiGroups: [""]
-        resources: ["secrets","configmaps"]
-        verbs: ["get","list",]
+        resources: ["secrets", "configmaps"]
+        verbs: ["get", "list"]
       # Track and get the runner, experiment, and helper pods log
       - apiGroups: [""]
         resources: ["pods/log"]
-        verbs: ["get","list","watch"]
-      # for creating and managing to execute comands inside target container
+        verbs: ["get", "list", "watch"]
+      # for creating and managing to execute commands inside target container
       - apiGroups: [""]
         resources: ["pods/exec"]
-        verbs: ["get","list","create"]
+        verbs: ["get", "list", "create"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
-        verbs: ["create","list","get","delete","deletecollection"]
+        verbs: ["create", "list", "get", "delete", "deletecollection"]
       # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
       - apiGroups: ["litmuschaos.io"]
-        resources: ["chaosengines","chaosexperiments","chaosresults"]
-        verbs: ["create","list","get","patch","update","delete"]
-    image: "litmuschaos.docker.scarf.sh/litmuschaos/py-runner:3.13.0"
+        resources: ["chaosengines", "chaosexperiments", "chaosresults"]
+        verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
+    image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - python3 -u experiment -name aws-az-chaos
+      - ./experiments -name ec2-terminate-by-id
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: '30'
+        value: "30"
+
       - name: CHAOS_INTERVAL
-        value: '30'
-      - name: LIB
-        value: 'litmus'
-      - name: LOAD_BALANCER_NAME
-        value: ''
-      - name: LOAD_BALANCER_ZONES
-        value: ''
-      - name: LOAD_BALANCERNAME_ARN
-        value: 'na'
+        value: "30"
+
+      # Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
+        value: ""
+
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
+      # enable it if the target instance is a part of self-managed nodegroup.
+      - name: MANAGED_NODEGROUP
+        value: "disable"
+
+      # Instance ID of the target EC2 instance
+      # Multiple IDs can also be provided as comma separated values ex: id1,id2
+      - name: EC2_INSTANCE_ID
+        value: ""
+
+      - name: REGION
+        value: ""
+
+      - name: SEQUENCE
+        value: "parallel"
+
+      # Provide the path of AWS credentials mounted from secret
       - name: AWS_SHARED_CREDENTIALS_FILE
         value: "/tmp/cloud_config.yml"
-      - name: RAMP_TIME
-        value: ''
+
     labels:
-      name: aws-az-chaos
+      name: ec2-stop-by-id
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -575,94 +606,73 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Detaching an EBS volume from EC2 instance.
+    It injects AZ chaos to detach the target zones from the load balancer.
 kind: ChaosExperiment
 metadata:
-  name: ebs-loss-by-tag
+  name: aws-az-chaos
   labels:
-    name: ebs-loss-by-tag
+    name: aws-az-chaos
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Cluster
+    scope: Namespaced
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
         resources: ["pods"]
-        verbs:
-          [
-            "create",
-            "delete",
-            "get",
-            "list",
-            "patch",
-            "update",
-            "deletecollection",
-          ]
+        verbs: ["create","delete","get","list","patch","update", "deletecollection"]
       # Performs CRUD operations on the events inside chaosengine and chaosresult
       - apiGroups: [""]
         resources: ["events"]
-        verbs: ["create", "get", "list", "patch", "update"]
+        verbs: ["create","get","list","patch","update"]
       # Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
       - apiGroups: [""]
-        resources: ["secrets", "configmaps"]
-        verbs: ["get", "list"]
+        resources: ["secrets","configmaps"]
+        verbs: ["get","list",]
       # Track and get the runner, experiment, and helper pods log
       - apiGroups: [""]
         resources: ["pods/log"]
-        verbs: ["get", "list", "watch"]
-      # for creating and managing to execute commands inside target container
+        verbs: ["get","list","watch"]
+      # for creating and managing to execute comands inside target container
       - apiGroups: [""]
         resources: ["pods/exec"]
-        verbs: ["get", "list", "create"]
+        verbs: ["get","list","create"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
-        verbs: ["create", "list", "get", "delete", "deletecollection"]
+        verbs: ["create","list","get","delete","deletecollection"]
       # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
       - apiGroups: ["litmuschaos.io"]
-        resources: ["chaosengines", "chaosexperiments", "chaosresults"]
-        verbs: ["create", "list", "get", "patch", "update", "delete"]
-    image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+        resources: ["chaosengines","chaosexperiments","chaosresults"]
+        verbs: ["create","list","get","patch","update","delete"]
+    image: "litmuschaos.docker.scarf.sh/litmuschaos/py-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name ebs-loss-by-tag
+      - python3 -u experiment -name aws-az-chaos
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "30"
-
+        value: '30'
       - name: CHAOS_INTERVAL
-        value: "30"
-
-      - name: RAMP_TIME
-        value: ""
-
-      - name: EBS_VOLUME_TAG
-        value: ""
-
-      - name: REGION
-        value: ""
-
-      - name: SEQUENCE
-        value: "parallel"
-
-      - name: VOLUME_AFFECTED_PERC
-        value: ""
-
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
-      # Provide the path of AWS credentials mounted from secret
+        value: '30'
+      - name: LIB
+        value: 'litmus'
+      - name: LOAD_BALANCER_NAME
+        value: ''
+      - name: LOAD_BALANCER_ZONES
+        value: ''
+      - name: LOAD_BALANCERNAME_ARN
+        value: 'na'
       - name: AWS_SHARED_CREDENTIALS_FILE
         value: "/tmp/cloud_config.yml"
-
+      - name: RAMP_TIME
+        value: ''
     labels:
-      name: ebs-loss-by-tag
+      name: aws-az-chaos
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -675,12 +685,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Stopping an EC2 instance identified by ID.
+    Detaching an EBS volume from EC2 instance for a certain chaos duration.
 kind: ChaosExperiment
 metadata:
-  name: ec2-stop-by-id
+  name: ebs-loss-by-id
   labels:
-    name: ec2-stop-by-id
+    name: ebs-loss-by-id
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -725,15 +735,11 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name ec2-terminate-by-id
+      - ./experiments -name ebs-loss-by-id
     command:
       - /bin/bash
     env:
@@ -747,16 +753,7 @@ spec:
       - name: RAMP_TIME
         value: ""
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
-      # enable it if the target instance is a part of self-managed nodegroup.
-      - name: MANAGED_NODEGROUP
-        value: "disable"
-
-      # Instance ID of the target EC2 instance
-      # Multiple IDs can also be provided as comma separated values ex: id1,id2
-      - name: EC2_INSTANCE_ID
+      - name: EBS_VOLUME_ID
         value: ""
 
       - name: REGION
@@ -765,12 +762,15 @@ spec:
       - name: SEQUENCE
         value: "parallel"
 
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
       # Provide the path of AWS credentials mounted from secret
       - name: AWS_SHARED_CREDENTIALS_FILE
         value: "/tmp/cloud_config.yml"
 
     labels:
-      name: ec2-stop-by-id
+      name: ebs-loss-by-id
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
diff --git a/faults/gcp/experiments.yaml b/faults/gcp/experiments.yaml
index 1fc0b50a1..20d4862c8 100644
--- a/faults/gcp/experiments.yaml
+++ b/faults/gcp/experiments.yaml
@@ -110,12 +110,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Causes loss of a non-boot storage persistent disk from a GCP VM instance filtered by a label for a specified duration before attaching them back
+    Stops GCP VM instances and GKE nodes for a specified duration and later restarts them
 kind: ChaosExperiment
 metadata:
-  name: gcp-vm-disk-loss-by-label
+  name: gcp-vm-instance-stop
   labels:
-    name: gcp-vm-disk-loss-by-label
+    name: gcp-vm-instance-stop
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -156,48 +156,60 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name gcp-vm-disk-loss-by-label
+      - ./experiments -name gcp-vm-instance-stop
     command:
       - /bin/bash
     env:
-      # set chaos duration (in sec) as desired
       - name: TOTAL_CHAOS_DURATION
         value: "30"
 
-      # set chaos interval (in sec) as desired
       - name: CHAOS_INTERVAL
         value: "30"
 
-      # set the GCP project id
-      - name: GCP_PROJECT_ID
+      # parallel or serial; determines how the VM instances are terminated, all at once or one at a time
+      - name: SEQUENCE
+        value: "parallel"
+
+      # period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
-      # set the zone in which all the disks are created
-      # all the disks must exist in the same zone
-      - name: ZONES
+      # enable or disable; shall be set to enable if the target instances are a part of a managed instance group
+      - name: MANAGED_INSTANCE_GROUP
+        value: "disable"
+
+      # Instance name of the target VM instance(s)
+      # Multiple instance names can be provided as comma separated values ex: instance1,instance2
+      - name: VM_INSTANCE_NAMES
         value: ""
 
-      # set the label of the target disk volumes
-      - name: DISK_VOLUME_LABEL
+      # GCP project ID to which the VM instances belong
+      - name: GCP_PROJECT_ID
         value: ""
 
-      # set the percentage value of the disks with the given label
-      # which should be targeted as part of the chaos injection
-      - name: DISK_AFFECTED_PERC
+      # Instance zone(s) of the target VM instance(s)
+      # If more than one instance is targetted, provide zone for each in the order of their
+      # respective instance name in VM_INSTANCE_NAME as comma separated values ex: zone1,zone2
+      - name: ZONES
         value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
+      # parallel or serial; determines how chaos is injected
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: gcp-vm-disk-loss-by-label
+      name: gcp-vm-instance-stop
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -210,12 +222,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration before attaching them back
+    Causes loss of a non-boot storage persistent disk from a GCP VM instance filtered by a label for a specified duration before attaching them back
 kind: ChaosExperiment
 metadata:
-  name: gcp-vm-disk-loss
+  name: gcp-vm-disk-loss-by-label
   labels:
-    name: gcp-vm-disk-loss
+    name: gcp-vm-disk-loss-by-label
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -260,48 +272,44 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name gcp-vm-disk-loss
+      - ./experiments -name gcp-vm-disk-loss-by-label
     command:
       - /bin/bash
     env:
+      # set chaos duration (in sec) as desired
       - name: TOTAL_CHAOS_DURATION
         value: "30"
 
+      # set chaos interval (in sec) as desired
       - name: CHAOS_INTERVAL
         value: "30"
 
-      # Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
-        value: ""
-
-      # parallel or serial; determines how chaos is injected
-      - name: SEQUENCE
-        value: "parallel"
-
       # set the GCP project id
       - name: GCP_PROJECT_ID
         value: ""
 
-      # set the disk volume name(s) as comma seperated values
-      # eg. volume1,volume2,...
-      - name: DISK_VOLUME_NAMES
+      # set the zone in which all the disks are created
+      # all the disks must exist in the same zone
+      - name: ZONES
         value: ""
 
-      # set the disk zone(s) as comma seperated values in the corresponding
-      # order of DISK_VOLUME_NAME
-      # eg. zone1,zone2,...
-      - name: ZONES
+      # set the label of the target disk volumes
+      - name: DISK_VOLUME_LABEL
+        value: ""
+
+      # set the percentage value of the disks with the given label
+      # which should be targeted as part of the chaos injection
+      - name: DISK_AFFECTED_PERC
         value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      # parallel or serial; determines how chaos is injected
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: gcp-vm-disk-loss
+      name: gcp-vm-disk-loss-by-label
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -314,12 +322,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Stops GCP VM instances and GKE nodes for a specified duration and later restarts them
+    Causes loss of a non-boot storage persistent disk from a GCP VM instance for a specified duration before attaching them back
 kind: ChaosExperiment
 metadata:
-  name: gcp-vm-instance-stop
+  name: gcp-vm-disk-loss
   labels:
-    name: gcp-vm-instance-stop
+    name: gcp-vm-disk-loss
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -360,15 +368,11 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name gcp-vm-instance-stop
+      - ./experiments -name gcp-vm-disk-loss
     command:
       - /bin/bash
     env:
@@ -378,30 +382,26 @@ spec:
       - name: CHAOS_INTERVAL
         value: "30"
 
-      # parallel or serial; determines how the VM instances are terminated, all at once or one at a time
-      - name: SEQUENCE
-        value: "parallel"
-
-      # period to wait before and after injection of chaos in sec
+      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # enable or disable; shall be set to enable if the target instances are a part of a managed instance group
-      - name: MANAGED_INSTANCE_GROUP
-        value: "disable"
+      # parallel or serial; determines how chaos is injected
+      - name: SEQUENCE
+        value: "parallel"
 
-      # Instance name of the target VM instance(s)
-      # Multiple instance names can be provided as comma separated values ex: instance1,instance2
-      - name: VM_INSTANCE_NAMES
+      # set the GCP project id
+      - name: GCP_PROJECT_ID
         value: ""
 
-      # GCP project ID to which the VM instances belong
-      - name: GCP_PROJECT_ID
+      # set the disk volume name(s) as comma seperated values
+      # eg. volume1,volume2,...
+      - name: DISK_VOLUME_NAMES
         value: ""
 
-      # Instance zone(s) of the target VM instance(s)
-      # If more than one instance is targetted, provide zone for each in the order of their
-      # respective instance name in VM_INSTANCE_NAME as comma separated values ex: zone1,zone2
+      # set the disk zone(s) as comma seperated values in the corresponding
+      # order of DISK_VOLUME_NAME
+      # eg. zone1,zone2,...
       - name: ZONES
         value: ""
 
@@ -413,7 +413,7 @@ spec:
         value: "parallel"
 
     labels:
-      name: gcp-vm-instance-stop
+      name: gcp-vm-disk-loss
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
diff --git a/faults/kubernetes/experiments.yaml b/faults/kubernetes/experiments.yaml
index e29be008b..d35a4316e 100644
--- a/faults/kubernetes/experiments.yaml
+++ b/faults/kubernetes/experiments.yaml
@@ -1,13 +1,12 @@
 ---
 apiVersion: litmuschaos.io/v1alpha1
 description:
-  message: |
-    Pod DNS Spoof can spoof particular DNS requests in target pod container to desired target hostnames
+  message: "Kills a container belonging to an application pod \n"
 kind: ChaosExperiment
 metadata:
-  name: pod-dns-spoof
+  name: container-kill
   labels:
-    name: pod-dns-spoof
+    name: container-kill
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -69,55 +68,64 @@ spec:
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+    imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-dns-spoof
+      - ./experiments -name container-kill
     command:
       - /bin/bash
     env:
       - name: TARGET_CONTAINER
         value: ""
 
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+      # Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
+        value: ""
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      - name: TARGET_PODS
+        value: ""
 
-      - name: TOTAL_CHAOS_DURATION
-        value: "60" # in seconds
+      # provide the chaos interval
+      - name: CHAOS_INTERVAL
+        value: "10"
 
-      # Time period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
-        value: ""
+      - name: SIGNAL
+        value: "SIGKILL"
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
+      # provide the name of container runtime
+      # it supports docker, containerd, crio
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
+      # provide the total chaos duration
+      - name: TOTAL_CHAOS_DURATION
+        value: "20"
 
       ## percentage of total pods to target
       - name: PODS_AFFECTED_PERC
         value: ""
 
-      - name: TARGET_PODS
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
         value: ""
 
-      # provide the name of container runtime, it supports docker, containerd, crio
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
       ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
-      # map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed
-      - name: SPOOF_MAP
-        value: ""
-
     labels:
-      experiment: pod-dns-spoof
+      name: container-kill
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/runtime-api-usage: "true"
@@ -128,18 +136,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Give a memory hog on a node belonging to a deployment
+    Injects memory consumption on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: node-memory-hog
+  name: pod-memory-hog
   labels:
-    name: node-memory-hog
+    name: pod-memory-hog
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Cluster
+    scope: Namespaced
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -170,6 +178,22 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
+      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
+      - apiGroups: ["apps"]
+        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: ["apps.openshift.io"]
+        resources: ["deploymentconfigs"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: [""]
+        resources: ["replicationcontrollers"]
+        verbs: ["get", "list"]
+      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
+      - apiGroups: ["argoproj.io"]
+        resources: ["rollouts"]
+        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -178,178 +202,72 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name node-memory-hog
+      - ./experiments -name pod-memory-hog
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "120"
-
-      ## Specify the size as percent of total node capacity Ex: '30'
-      ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty
-      - name: MEMORY_CONSUMPTION_PERCENTAGE
-        value: ""
+        value: "60"
 
-      ## Specify the amount of memory to be consumed in mebibytes
-      ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty
-      - name: MEMORY_CONSUMPTION_MEBIBYTES
-        value: ""
+      ## enter the amount of memory in megabytes to be consumed by the application pod
+      - name: MEMORY_CONSUMPTION
+        value: "500"
 
+      ## Number of workers to perform stress
       - name: NUMBER_OF_WORKERS
         value: "1"
 
-      # ENTER THE COMMA SEPARATED TARGET NODES NAME
-      - name: TARGET_NODES
-        value: ""
-
-      - name: NODE_LABEL
+      ## percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
-      # Period to wait before and after injection of chaos in sec
+      ## Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # provide lib image
+      ## It is used in pumba lib only
       - name: LIB_IMAGE
         value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      ## It is used in pumba lib only
+      - name: STRESS_IMAGE
+        value: "alexeiled/stress-ng:latest-ubuntu"
 
-      ## percentage of total nodes to target
-      - name: NODES_AFFECTED_PERC
-        value: ""
+      ## provide the cluster runtime
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
 
-      ## it defines the sequence of chaos execution for multiple target nodes
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
+      ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
-    labels:
-      name: node-memory-hog
-      app.kubernetes.io/part-of: litmus
-      app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/version: 3.13.0
-
----
----
-apiVersion: litmuschaos.io/v1alpha1
-description:
-  message: |
-    Give a CPU spike on a node belonging to a deployment
-kind: ChaosExperiment
-metadata:
-  name: node-cpu-hog
-  labels:
-    name: node-cpu-hog
-    app.kubernetes.io/part-of: litmus
-    app.kubernetes.io/component: chaosexperiment
-    app.kubernetes.io/version: 3.13.0
-spec:
-  definition:
-    scope: Cluster
-    permissions:
-      # Create and monitor the experiment & helper pods
-      - apiGroups: [""]
-        resources: ["pods"]
-        verbs:
-          [
-            "create",
-            "delete",
-            "get",
-            "list",
-            "patch",
-            "update",
-            "deletecollection",
-          ]
-      # Performs CRUD operations on the events inside chaosengine and chaosresult
-      - apiGroups: [""]
-        resources: ["events"]
-        verbs: ["create", "get", "list", "patch", "update"]
-      # Fetch configmaps details and mount it to the experiment pod (if specified)
-      - apiGroups: [""]
-        resources: ["configmaps"]
-        verbs: ["get", "list"]
-      # Track and get the runner, experiment, and helper pods log
-      - apiGroups: [""]
-        resources: ["pods/log"]
-        verbs: ["get", "list", "watch"]
-      # for creating and managing to execute commands inside target container
-      - apiGroups: [""]
-        resources: ["pods/exec"]
-        verbs: ["get", "list", "create"]
-      # for configuring and monitor the experiment job by the chaos-runner pod
-      - apiGroups: ["batch"]
-        resources: ["jobs"]
-        verbs: ["create", "list", "get", "delete", "deletecollection"]
-      # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
-      - apiGroups: ["litmuschaos.io"]
-        resources: ["chaosengines", "chaosexperiments", "chaosresults"]
-        verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
-    image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-    imagePullPolicy: Always
-    args:
-      - -c
-      - ./experiments -name node-cpu-hog
-    command:
-      - /bin/bash
-    env:
-      - name: TOTAL_CHAOS_DURATION
-        value: "60"
-
-      # Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
-        value: ""
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
-      ## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING
-      ## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY
-      - name: NODE_CPU_CORE
+      - name: TARGET_CONTAINER
         value: ""
 
-      ## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
-      ## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
-      - name: CPU_LOAD
-        value: "100"
-
-      # ENTER THE COMMA SEPARATED TARGET NODES NAME
-      - name: TARGET_NODES
+      - name: TARGET_PODS
         value: ""
 
+      # To select pods on specific node(s)
       - name: NODE_LABEL
         value: ""
 
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      ## percentage of total nodes to target
-      - name: NODES_AFFECTED_PERC
-        value: ""
-
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
-      ## it defines the sequence of chaos execution for multiple target nodes
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
-
     labels:
-      name: node-cpu-hog
+      name: pod-memory-hog
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -357,12 +275,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects memory consumption on pods belonging to an app deployment
+    It injects the chaos inside the pod which modifies the body of the response from the provided application server to the body string provided by the user and reverts after a specified duration
 kind: ChaosExperiment
 metadata:
-  name: pod-memory-hog-exec
+  name: pod-http-modify-body
   labels:
-    name: pod-memory-hog-exec
+    name: pod-http-modify-body
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -427,45 +345,88 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-memory-hog-exec
+      - ./experiments -name pod-http-modify-body
     command:
       - /bin/bash
     env:
-      - name: TOTAL_CHAOS_DURATION
-        value: "60"
+      - name: TARGET_CONTAINER
+        value: ""
 
-      ## enter the amount of memory in megabytes to be consumed by the application pod
-      - name: MEMORY_CONSUMPTION
-        value: "500"
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      ## percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
+      # provide the body string to overwrite the response body
+      # if no value is provided, response will be an empty body.
+      - name: RESPONSE_BODY
         value: ""
 
-      ## Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
+      # provide the encoding type for the response body
+      # currently supported value are gzip, deflate
+      # if empty no encoding will be applied
+      - name: CONTENT_ENCODING
         value: ""
 
-      # The command to kill the chaos process
-      - name: CHAOS_KILL_COMMAND
-        value: "kill $(find /proc -name exe -lname '*/dd' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}' | head -n 1)"
+      # provide the content type for the response body
+      - name: CONTENT_TYPE
+        value: "text/plain"
 
-      ## it defines the sequence of chaos execution for multiple target pods
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
+      # port of the target service
+      - name: TARGET_SERVICE_PORT
+        value: "80"
+
+      # toxicity is the probability of the request to be affected
+      # provide the percentage value in the range of 0-100
+      # 0 means no request will be affected and 100 means all request will be affected
+      - name: TOXICITY
+        value: "100"
+
+      # port on which the proxy will listen
+      - name: PROXY_PORT
+        value: "20000"
+
+      # network interface on which the proxy will listen
+      - name: NETWORK_INTERFACE
+        value: "eth0"
+
+      - name: TOTAL_CHAOS_DURATION
+        value: "60" # in seconds
+
+      # Time period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
+        value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      - name: TARGET_CONTAINER
+      # percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
       - name: TARGET_PODS
         value: ""
 
+      # provide the name of container runtime
+      # for litmus LIB, it supports docker, containerd, crio
+      # for pumba LIB, it supports docker only
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
+        value: ""
+
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
+
     labels:
-      name: pod-memory-hog-exec
+      name: pod-http-modify-body
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -475,18 +436,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects HTTP request latency on pods belonging to an app deployment
+    Kills the Docker service on the application node to check the resiliency.
 kind: ChaosExperiment
 metadata:
-  name: pod-http-latency
+  name: docker-service-kill
   labels:
-    name: pod-http-latency
+    name: docker-service-kill
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Namespaced
+    scope: Cluster
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -517,22 +478,6 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-      - apiGroups: ["apps"]
-        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: ["apps.openshift.io"]
-        resources: ["deploymentconfigs"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: [""]
-        resources: ["replicationcontrollers"]
-        verbs: ["get", "list"]
-      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-      - apiGroups: ["argoproj.io"]
-        resources: ["rollouts"]
-        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -541,83 +486,139 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-http-latency
+      - ./experiments -name docker-service-kill
     command:
       - /bin/bash
     env:
-      - name: TARGET_CONTAINER
+      - name: TOTAL_CHAOS_DURATION
+        value: "90" # in seconds
+
+      # Period to wait before injection of chaos in sec
+      - name: RAMP_TIME
+        value: ""
+
+      - name: NODE_LABEL
         value: ""
 
       # provide lib image
       - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      - name: LATENCY
-        value: "2000" #in ms
-
-      # port of the target service
-      - name: TARGET_SERVICE_PORT
-        value: "80"
+        value: "ubuntu:16.04"
 
-      # toxicity is the probability of the request to be affected
-      # provide the percentage value in the range of 0-100
-      # 0 means no request will be affected and 100 means all request will be affected
-      - name: TOXICITY
-        value: "100"
+      # provide the target node name
+      - name: TARGET_NODE
+        value: ""
 
-      # port on which the proxy will listen
-      - name: PROXY_PORT
-        value: "20000"
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
-      # network interface on which the proxy will listen
-      - name: NETWORK_INTERFACE
-        value: "eth0"
+    labels:
+      name: docker-service-kill
+      app.kubernetes.io/part-of: litmus
+      app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/service-kill: "true"
+      app.kubernetes.io/version: 3.13.0
 
+---
+---
+apiVersion: litmuschaos.io/v1alpha1
+description:
+  message: |
+    Kills the Kubelet service on the application node to check the resiliency.
+kind: ChaosExperiment
+metadata:
+  name: kubelet-service-kill
+  labels:
+    name: kubelet-service-kill
+    app.kubernetes.io/part-of: litmus
+    app.kubernetes.io/component: chaosexperiment
+    app.kubernetes.io/version: 3.13.0
+spec:
+  definition:
+    scope: Cluster
+    permissions:
+      # Create and monitor the experiment & helper pods
+      - apiGroups: [""]
+        resources: ["pods"]
+        verbs:
+          [
+            "create",
+            "delete",
+            "get",
+            "list",
+            "patch",
+            "update",
+            "deletecollection",
+          ]
+      # Performs CRUD operations on the events inside chaosengine and chaosresult
+      - apiGroups: [""]
+        resources: ["events"]
+        verbs: ["create", "get", "list", "patch", "update"]
+      # Fetch configmaps details and mount it to the experiment pod (if specified)
+      - apiGroups: [""]
+        resources: ["configmaps"]
+        verbs: ["get", "list"]
+      # Track and get the runner, experiment, and helper pods log
+      - apiGroups: [""]
+        resources: ["pods/log"]
+        verbs: ["get", "list", "watch"]
+      # for creating and managing to execute commands inside target container
+      - apiGroups: [""]
+        resources: ["pods/exec"]
+        verbs: ["get", "list", "create"]
+      # for configuring and monitor the experiment job by the chaos-runner pod
+      - apiGroups: ["batch"]
+        resources: ["jobs"]
+        verbs: ["create", "list", "get", "delete", "deletecollection"]
+      # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
+      - apiGroups: ["litmuschaos.io"]
+        resources: ["chaosengines", "chaosexperiments", "chaosresults"]
+        verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
+    image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+    imagePullPolicy: Always
+    args:
+      - -c
+      - ./experiments -name kubelet-service-kill
+    command:
+      - /bin/bash
+    env:
       - name: TOTAL_CHAOS_DURATION
         value: "60" # in seconds
 
-      # Time period to wait before and after injection of chaos in sec
+      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
-      # percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
-        value: ""
-
-      - name: TARGET_PODS
+      - name: NODE_LABEL
         value: ""
 
-      # provide the name of container runtime
-      # for litmus LIB, it supports docker, containerd, crio
-      # for pumba LIB, it supports docker only
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
-
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "ubuntu:16.04"
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
+      # provide the target node name
+      - name: TARGET_NODE
         value: ""
 
-      ## it defines the sequence of chaos execution for multiple target pods
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
     labels:
-      name: pod-http-latency
+      name: kubelet-service-kill
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/runtime-api-usage: "true"
+      app.kubernetes.io/service-kill: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -625,18 +626,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    IO stress on a app pods belonging to an app deployment
+    Scale the application replicas and test the node autoscaling on cluster
 kind: ChaosExperiment
 metadata:
-  name: pod-io-stress
+  name: pod-autoscaler
   labels:
-    name: pod-io-stress
+    name: pod-autoscaler
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Namespaced
+    scope: Cluster
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -667,22 +668,10 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
+      # performs CRUD operations on the deployments and statefulsets
       - apiGroups: ["apps"]
-        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: ["apps.openshift.io"]
-        resources: ["deploymentconfigs"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: [""]
-        resources: ["replicationcontrollers"]
-        verbs: ["get", "list"]
-      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-      - apiGroups: ["argoproj.io"]
-        resources: ["rollouts"]
-        verbs: ["list", "get"]
+        resources: ["deployments", "statefulsets"]
+        verbs: ["list", "get", "patch", "update"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -695,87 +684,41 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-io-stress
+      - ./experiments -name pod-autoscaler
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "120"
-
-      ## specify the size as percentage of free space on the file system
-      ## default value 90 (in percentage)
-      - name: FILESYSTEM_UTILIZATION_PERCENTAGE
-        value: "10"
-
-      ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space
-      ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty
-      - name: FILESYSTEM_UTILIZATION_BYTES
-        value: ""
-
-      ## Total number of workers default value is 4
-      - name: NUMBER_OF_WORKERS
-        value: "4"
-
-      ## Percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
-        value: ""
-
-      # provide volume mount path
-      - name: VOLUME_MOUNT_PATH
-        value: ""
-
-      - name: TARGET_CONTAINER
-        value: ""
-
-      ## specify the comma separated target pods
-      - name: TARGET_PODS
-        value: ""
-
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
-        value: ""
+        value: "60"
 
       # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+      # Number of replicas to scale
+      - name: REPLICA_COUNT
+        value: "5"
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      ## provide the cluster runtime
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
-
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
-
-      ## it defines the sequence of chaos execution for multiple target pods
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
-
     labels:
-      name: pod-io-stress
+      name: pod-autoscaler
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
 ---
 apiVersion: litmuschaos.io/v1alpha1
 description:
-  message: "Kills a container belonging to an application pod \n"
+  message: |
+    Injects network packet duplication on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: container-kill
+  name: pod-network-duplication
   labels:
-    name: container-kill
+    name: pod-network-duplication
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -840,53 +783,64 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name container-kill
+      - ./experiments -name pod-network-duplication
     command:
       - /bin/bash
     env:
-      - name: TARGET_CONTAINER
-        value: ""
+      - name: TOTAL_CHAOS_DURATION
+        value: "60"
 
-      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      - name: TARGET_PODS
+      - name: TARGET_CONTAINER
         value: ""
 
-      # provide the chaos interval
-      - name: CHAOS_INTERVAL
-        value: "10"
+      - name: TC_IMAGE
+        value: "gaiadocker/iproute2"
 
-      - name: SIGNAL
-        value: "SIGKILL"
+      - name: NETWORK_INTERFACE
+        value: "eth0"
 
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
+      - name: NETWORK_PACKET_DUPLICATION_PERCENTAGE
+        value: "100" # in percentage
 
-      # provide the name of container runtime
-      # it supports docker, containerd, crio
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
+      - name: TARGET_PODS
+        value: ""
 
-      # provide the total chaos duration
-      - name: TOTAL_CHAOS_DURATION
-        value: "20"
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
+        value: ""
 
       ## percentage of total pods to target
       - name: PODS_AFFECTED_PERC
         value: ""
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
-        value: ""
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+
+      # provide the name of container runtime
+      # for litmus LIB, it supports docker, containerd, crio
+      # for pumba LIB, it supports docker only
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+      # provide the destination ips
+      # chaos injection will be triggered for these destination ips
+      - name: DESTINATION_IPS
+        value: ""
+
+      # provide the destination hosts
+      # chaos injection will be triggered for these destination hosts
+      - name: DESTINATION_HOSTS
+        value: ""
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
 
       ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
@@ -894,7 +848,7 @@ spec:
         value: "parallel"
 
     labels:
-      name: container-kill
+      name: pod-network-duplication
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/runtime-api-usage: "true"
@@ -905,18 +859,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It injects the chaos inside the pod which modifies the header of the request/response from the provided application server to the headers provided by the user and reverts after a specified duration
+    Give a CPU spike on a node belonging to a deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-http-modify-header
+  name: node-cpu-hog
   labels:
-    name: pod-http-modify-header
+    name: node-cpu-hog
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Namespaced
+    scope: Cluster
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -947,22 +901,6 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-      - apiGroups: ["apps"]
-        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: ["apps.openshift.io"]
-        resources: ["deploymentconfigs"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: [""]
-        resources: ["replicationcontrollers"]
-        verbs: ["get", "list"]
-      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-      - apiGroups: ["argoproj.io"]
-        resources: ["rollouts"]
-        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -971,89 +909,62 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-http-modify-header
+      - ./experiments -name node-cpu-hog
     command:
       - /bin/bash
     env:
-      - name: TARGET_CONTAINER
-        value: ""
-
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      # map of headers to modify/add; Eg: {"X-Litmus-Test-Header": "X-Litmus-Test-Value"}
-      # to remove a header, just set the value to ""; Eg: {"X-Litmus-Test-Header": ""}
-      - name: HEADERS_MAP
-        value: "{}"
-
-      # whether to modify response headers or request headers. Accepted values: request, response
-      - name: HEADER_MODE
-        value: "response"
-
-      # port of the target service
-      - name: TARGET_SERVICE_PORT
-        value: "80"
-
-      # toxicity is the probability of the request to be affected
-      # provide the percentage value in the range of 0-100
-      # 0 means no request will be affected and 100 means all request will be affected
-      - name: TOXICITY
-        value: "100"
-
-      # port on which the proxy will listen
-      - name: PROXY_PORT
-        value: "20000"
-
-      # network interface on which the proxy will listen
-      - name: NETWORK_INTERFACE
-        value: "eth0"
-
       - name: TOTAL_CHAOS_DURATION
-        value: "60" # in seconds
+        value: "60"
 
-      # Time period to wait before and after injection of chaos in sec
+      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
-      # percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
+      ## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING
+      ## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY
+      - name: NODE_CPU_CORE
         value: ""
 
-      - name: TARGET_PODS
+      ## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
+      ## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
+      - name: CPU_LOAD
+        value: "100"
+
+      # ENTER THE COMMA SEPARATED TARGET NODES NAME
+      - name: TARGET_NODES
         value: ""
 
-      # provide the name of container runtime
-      # for litmus LIB, it supports docker, containerd, crio
-      # for pumba LIB, it supports docker only
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
+      - name: NODE_LABEL
+        value: ""
 
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
+      ## percentage of total nodes to target
+      - name: NODES_AFFECTED_PERC
         value: ""
 
-      ## it defines the sequence of chaos execution for multiple target pods
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
+      ## it defines the sequence of chaos execution for multiple target nodes
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: pod-http-modify-header
+      name: node-cpu-hog
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -1061,12 +972,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Kills the Kubelet service on the application node to check the resiliency.
+    Poweroff a node in the cluster
 kind: ChaosExperiment
 metadata:
-  name: kubelet-service-kill
+  name: node-poweroff
   labels:
-    name: kubelet-service-kill
+    name: node-poweroff
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -1091,15 +1002,15 @@ spec:
       - apiGroups: [""]
         resources: ["events"]
         verbs: ["create", "get", "list", "patch", "update"]
-      # Fetch configmaps details and mount it to the experiment pod (if specified)
+      # Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
       - apiGroups: [""]
-        resources: ["configmaps"]
+        resources: ["configmaps", "secrets"]
         verbs: ["get", "list"]
       # Track and get the runner, experiment, and helper pods log
       - apiGroups: [""]
         resources: ["pods/log"]
         verbs: ["get", "list", "watch"]
-      # for creating and managing to execute commands inside target container
+      # for creating and managing to execute comands inside target container
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
@@ -1119,49 +1030,58 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name kubelet-service-kill
+      - ./experiments -name node-restart
     command:
       - /bin/bash
     env:
+      - name: SSH_USER
+        value: "root"
+
       - name: TOTAL_CHAOS_DURATION
-        value: "60" # in seconds
+        value: "60"
+
+      - name: REBOOT_COMMAND
+        value: '-o ServerAliveInterval=1 -o ServerAliveCountMax=1 "sudo systemctl poweroff --force --force" ; true'
 
       # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      - name: NODE_LABEL
-        value: ""
-
       # provide lib image
       - name: LIB_IMAGE
-        value: "ubuntu:16.04"
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      # provide the target node name
+      # ENTER THE TARGET NODE NAME
       - name: TARGET_NODE
         value: ""
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      - name: NODE_LABEL
+        value: ""
+
+      # ENTER THE TARGET NODE IP
+      - name: TARGET_NODE_IP
+        value: ""
 
     labels:
-      name: kubelet-service-kill
+      name: node-poweroff
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/service-kill: "true"
       app.kubernetes.io/version: 3.13.0
+    secrets:
+      - name: id-rsa
+        mountPath: /mnt/
 
 ---
 ---
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects network latency on pods belonging to an app deployment
+    Injects memory consumption on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-network-latency
+  name: pod-memory-hog-exec
   labels:
-    name: pod-network-latency
+    name: pod-memory-hog-exec
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -1226,80 +1146,140 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-network-latency
+      - ./experiments -name pod-memory-hog-exec
     command:
       - /bin/bash
     env:
-      - name: TARGET_CONTAINER
-        value: ""
-
-      - name: NETWORK_INTERFACE
-        value: "eth0"
-
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      - name: TC_IMAGE
-        value: "gaiadocker/iproute2"
-
-      - name: NETWORK_LATENCY
-        value: "2000" #in ms
-
       - name: TOTAL_CHAOS_DURATION
-        value: "60" # in seconds
-
-      # Time period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
-        value: ""
+        value: "60"
 
-      - name: JITTER
-        value: "0"
+      ## enter the amount of memory in megabytes to be consumed by the application pod
+      - name: MEMORY_CONSUMPTION
+        value: "500"
 
       ## percentage of total pods to target
       - name: PODS_AFFECTED_PERC
         value: ""
 
-      - name: TARGET_PODS
+      ## Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
-      # provide the name of container runtime
-      # for litmus LIB, it supports docker, containerd, crio
-      # for pumba LIB, it supports docker only
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
+      # The command to kill the chaos process
+      - name: CHAOS_KILL_COMMAND
+        value: "kill $(find /proc -name exe -lname '*/dd' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}' | head -n 1)"
+
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      # provide the destination ips
-      # chaos injection will be triggered for these destination ips
-      - name: DESTINATION_IPS
+      - name: TARGET_CONTAINER
+        value: ""
+
+      - name: TARGET_PODS
+        value: ""
+
+    labels:
+      name: pod-memory-hog-exec
+      app.kubernetes.io/part-of: litmus
+      app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/version: 3.13.0
+
+---
+---
+apiVersion: litmuschaos.io/v1alpha1
+description:
+  message: |
+    Drain the node where application pod is scheduled
+kind: ChaosExperiment
+metadata:
+  name: node-drain
+  labels:
+    name: node-drain
+    app.kubernetes.io/part-of: litmus
+    app.kubernetes.io/component: chaosexperiment
+    app.kubernetes.io/version: 3.13.0
+spec:
+  definition:
+    scope: Cluster
+    permissions:
+      # Create and monitor the experiment & helper pods
+      - apiGroups: [""]
+        resources: ["pods"]
+        verbs:
+          [
+            "create",
+            "delete",
+            "get",
+            "list",
+            "patch",
+            "update",
+            "deletecollection",
+          ]
+      # Performs CRUD operations on the events inside chaosengine and chaosresult
+      - apiGroups: [""]
+        resources: ["events"]
+        verbs: ["create", "get", "list", "patch", "update"]
+      # Fetch configmaps details and mount it to the experiment pod (if specified)
+      - apiGroups: [""]
+        resources: ["configmaps"]
+        verbs: ["get", "list"]
+      # Track and get the runner, experiment, and helper pods log
+      - apiGroups: [""]
+        resources: ["pods/log"]
+        verbs: ["get", "list", "watch"]
+      # for creating and managing to execute commands inside target container
+      - apiGroups: [""]
+        resources: ["pods/exec", "pods/eviction"]
+        verbs: ["get", "list", "create"]
+      # ignore daemonsets while draining the node
+      - apiGroups: ["apps"]
+        resources: ["daemonsets"]
+        verbs: ["list", "get", "delete"]
+      # for configuring and monitor the experiment job by the chaos-runner pod
+      - apiGroups: ["batch"]
+        resources: ["jobs"]
+        verbs: ["create", "list", "get", "delete", "deletecollection"]
+      # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
+      - apiGroups: ["litmuschaos.io"]
+        resources: ["chaosengines", "chaosexperiments", "chaosresults"]
+        verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list", "patch"]
+    image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+    imagePullPolicy: Always
+    args:
+      - -c
+      - ./experiments -name node-drain
+    command:
+      - /bin/bash
+    env:
+      - name: TARGET_NODE
         value: ""
 
-      # provide the destination hosts
-      # chaos injection will be triggered for these destination hosts
-      - name: DESTINATION_HOSTS
+      - name: NODE_LABEL
         value: ""
 
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
+      - name: TOTAL_CHAOS_DURATION
+        value: "60"
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
+      # Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
-      ## it defines the sequence of chaos execution for multiple target pods
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
     labels:
-      name: pod-network-latency
+      name: node-drain
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -1307,12 +1287,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Inject network packet corruption into application pod
+    Pod DNS Error injects DNS failure/error in target pod containers
 kind: ChaosExperiment
 metadata:
-  name: pod-network-corruption
+  name: pod-dns-error
   labels:
-    name: pod-network-corruption
+    name: pod-dns-error
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -1374,10 +1354,9 @@ spec:
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-    imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-network-corruption
+      - ./experiments -name pod-dns-error
     command:
       - /bin/bash
     env:
@@ -1388,14 +1367,8 @@ spec:
       - name: LIB_IMAGE
         value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      - name: NETWORK_INTERFACE
-        value: "eth0"
-
-      - name: TC_IMAGE
-        value: "gaiadocker/iproute2"
-
-      - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE
-        value: "100" #in PERCENTAGE
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
       - name: TOTAL_CHAOS_DURATION
         value: "60" # in seconds
@@ -1411,40 +1384,29 @@ spec:
       - name: TARGET_PODS
         value: ""
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
-        value: ""
-
-      # provide the name of container runtime
-      # for litmus LIB, it supports docker, containerd, crio
-      # for pumba LIB, it supports docker only
+      # provide the name of container runtime, it supports docker, containerd, crio
       - name: CONTAINER_RUNTIME
         value: "containerd"
 
-      # provide the destination ips
-      # chaos injection will be triggered for these destination ips
-      - name: DESTINATION_IPS
-        value: ""
-
-      # provide the destination hosts
-      # chaos injection will be triggered for these destination hosts
-      - name: DESTINATION_HOSTS
-        value: ""
-
       # provide the socket file path
       - name: SOCKET_PATH
         value: "/run/containerd/containerd.sock"
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
       ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
+      # list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets
+      - name: TARGET_HOSTNAMES
+        value: ""
+
+      # can be either exact or substring, determines whether the DNS query has to match exactly with one of the targets or can have any of the targets as substring
+      - name: MATCH_SCHEME
+        value: "exact"
+
     labels:
-      name: pod-network-corruption
+      experiment: pod-dns-error
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/runtime-api-usage: "true"
@@ -1455,12 +1417,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects network packet duplication on pods belonging to an app deployment
+    Fillup Ephemeral Storage of a Resource
 kind: ChaosExperiment
 metadata:
-  name: pod-network-duplication
+  name: disk-fill
   labels:
-    name: pod-network-duplication
+    name: disk-fill
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -1525,31 +1487,34 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-network-duplication
+      - ./experiments -name disk-fill
     command:
       - /bin/bash
     env:
+      - name: TARGET_CONTAINER
+        value: ""
+
+      - name: FILL_PERCENTAGE
+        value: "80"
+
       - name: TOTAL_CHAOS_DURATION
         value: "60"
 
+      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      - name: TARGET_CONTAINER
-        value: ""
-
-      - name: TC_IMAGE
-        value: "gaiadocker/iproute2"
-
-      - name: NETWORK_INTERFACE
-        value: "eth0"
-
-      - name: NETWORK_PACKET_DUPLICATION_PERCENTAGE
-        value: "100" # in percentage
+      # provide the data block size
+      # supported unit is KB
+      - name: DATA_BLOCK_SIZE
+        value: "256"
 
       - name: TARGET_PODS
         value: ""
 
+      - name: EPHEMERAL_STORAGE_MEBIBYTES
+        value: ""
+
       # To select pods on specific node(s)
       - name: NODE_LABEL
         value: ""
@@ -1558,42 +1523,31 @@ spec:
       - name: PODS_AFFECTED_PERC
         value: ""
 
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      # provide the name of container runtime
-      # for litmus LIB, it supports docker, containerd, crio
-      # for pumba LIB, it supports docker only
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
-
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      # provide the destination ips
-      # chaos injection will be triggered for these destination ips
-      - name: DESTINATION_IPS
-        value: ""
-
-      # provide the destination hosts
-      # chaos injection will be triggered for these destination hosts
-      - name: DESTINATION_HOSTS
-        value: ""
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
       # provide the socket file path
       - name: SOCKET_PATH
         value: "/run/containerd/containerd.sock"
 
+      # provide the name of container runtime
+      # it supports docker, containerd, crio
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
       ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: pod-network-duplication
+      name: disk-fill
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/runtime-api-usage: "true"
+      app.kubernetes.io/host-path-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -1601,18 +1555,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Poweroff a node in the cluster
+    Injects network latency on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: node-poweroff
+  name: pod-network-latency
   labels:
-    name: node-poweroff
+    name: pod-network-latency
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Cluster
+    scope: Namespaced
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -1631,18 +1585,34 @@ spec:
       - apiGroups: [""]
         resources: ["events"]
         verbs: ["create", "get", "list", "patch", "update"]
-      # Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
+      # Fetch configmaps details and mount it to the experiment pod (if specified)
       - apiGroups: [""]
-        resources: ["configmaps", "secrets"]
+        resources: ["configmaps"]
         verbs: ["get", "list"]
       # Track and get the runner, experiment, and helper pods log
       - apiGroups: [""]
         resources: ["pods/log"]
         verbs: ["get", "list", "watch"]
-      # for creating and managing to execute comands inside target container
+      # for creating and managing to execute commands inside target container
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
+      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
+      - apiGroups: ["apps"]
+        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: ["apps.openshift.io"]
+        resources: ["deploymentconfigs"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: [""]
+        resources: ["replicationcontrollers"]
+        verbs: ["get", "list"]
+      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
+      - apiGroups: ["argoproj.io"]
+        resources: ["rollouts"]
+        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -1651,66 +1621,97 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name node-restart
+      - ./experiments -name pod-network-latency
     command:
       - /bin/bash
     env:
-      - name: SSH_USER
-        value: "root"
+      - name: TARGET_CONTAINER
+        value: ""
 
-      - name: TOTAL_CHAOS_DURATION
-        value: "60"
+      - name: NETWORK_INTERFACE
+        value: "eth0"
 
-      - name: REBOOT_COMMAND
-        value: '-o ServerAliveInterval=1 -o ServerAliveCountMax=1 "sudo systemctl poweroff --force --force" ; true'
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      # Period to wait before and after injection of chaos in sec
+      - name: TC_IMAGE
+        value: "gaiadocker/iproute2"
+
+      - name: NETWORK_LATENCY
+        value: "2000" #in ms
+
+      - name: TOTAL_CHAOS_DURATION
+        value: "60" # in seconds
+
+      # Time period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+      - name: JITTER
+        value: "0"
 
-      # ENTER THE TARGET NODE NAME
-      - name: TARGET_NODE
+      ## percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
-      - name: NODE_LABEL
+      - name: TARGET_PODS
         value: ""
 
-      # ENTER THE TARGET NODE IP
-      - name: TARGET_NODE_IP
+      # provide the name of container runtime
+      # for litmus LIB, it supports docker, containerd, crio
+      # for pumba LIB, it supports docker only
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
+      # provide the destination ips
+      # chaos injection will be triggered for these destination ips
+      - name: DESTINATION_IPS
+        value: ""
+
+      # provide the destination hosts
+      # chaos injection will be triggered for these destination hosts
+      - name: DESTINATION_HOSTS
+        value: ""
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
         value: ""
 
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
+
     labels:
-      name: node-poweroff
+      name: pod-network-latency
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
-    secrets:
-      - name: id-rsa
-        mountPath: /mnt/
 
 ---
 ---
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Pod DNS Error injects DNS failure/error in target pod containers
+    Pod DNS Spoof can spoof particular DNS requests in target pod container to desired target hostnames
 kind: ChaosExperiment
 metadata:
-  name: pod-dns-error
+  name: pod-dns-spoof
   labels:
-    name: pod-dns-error
+    name: pod-dns-spoof
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -1774,7 +1775,7 @@ spec:
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     args:
       - -c
-      - ./experiments -name pod-dns-error
+      - ./experiments -name pod-dns-spoof
     command:
       - /bin/bash
     env:
@@ -1815,16 +1816,12 @@ spec:
       - name: SEQUENCE
         value: "parallel"
 
-      # list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets
-      - name: TARGET_HOSTNAMES
+      # map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed
+      - name: SPOOF_MAP
         value: ""
 
-      # can be either exact or substring, determines whether the DNS query has to match exactly with one of the targets or can have any of the targets as substring
-      - name: MATCH_SCHEME
-        value: "exact"
-
     labels:
-      experiment: pod-dns-error
+      experiment: pod-dns-spoof
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/runtime-api-usage: "true"
@@ -1835,18 +1832,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Scale the application replicas and test the node autoscaling on cluster
+    Injects 100% network packet loss on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-autoscaler
+  name: pod-network-partition
   labels:
-    name: pod-autoscaler
+    name: pod-network-partition
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Cluster
+    scope: Namespaced
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -1877,14 +1874,14 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # performs CRUD operations on the deployments and statefulsets
-      - apiGroups: ["apps"]
-        resources: ["deployments", "statefulsets"]
-        verbs: ["list", "get", "patch", "update"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
         verbs: ["create", "list", "get", "delete", "deletecollection"]
+      # performs CRUD operations on the network policies
+      - apiGroups: ["networking.k8s.io"]
+        resources: ["networkpolicies"]
+        verbs: ["create", "delete", "list", "get"]
       # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
@@ -1893,26 +1890,49 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-autoscaler
+      - ./experiments -name pod-network-partition
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "60"
+        value: "60" # in seconds
 
-      # Period to wait before and after injection of chaos in sec
+      # time period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # Number of replicas to scale
-      - name: REPLICA_COUNT
-        value: "5"
-
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
+      # provide the destination ips
+      # chaos injection will be triggered for these destination ips
+      - name: DESTINATION_IPS
+        value: ""
+
+      # provide the destination hosts
+      # chaos injection will be triggered for these destination hosts
+      - name: DESTINATION_HOSTS
+        value: ""
+
+      # provide network policy type
+      # support ingress, egress, all values
+      - name: POLICY_TYPES
+        value: "all"
+
+      # provide labels of the destination pods
+      - name: POD_SELECTOR
+        value: ""
+
+      # provide labels the destination namespaces
+      - name: NAMESPACE_SELECTOR
+        value: ""
+
+      # provide comma separated ports
+      - name: PORTS
+        value: ""
+
     labels:
-      name: pod-autoscaler
+      name: pod-network-partition
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -1922,18 +1942,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Kills the Docker service on the application node to check the resiliency.
+    It injects chaos inside the pod which modifies the status code of the response from the provided application server to desired status code provided by the user and reverts after a specified duration
 kind: ChaosExperiment
 metadata:
-  name: docker-service-kill
+  name: pod-http-status-code
   labels:
-    name: docker-service-kill
+    name: pod-http-status-code
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Cluster
+    scope: Namespaced
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -1964,6 +1984,22 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
+      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
+      - apiGroups: ["apps"]
+        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: ["apps.openshift.io"]
+        resources: ["deploymentconfigs"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: [""]
+        resources: ["replicationcontrollers"]
+        verbs: ["get", "list"]
+      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
+      - apiGroups: ["argoproj.io"]
+        resources: ["rollouts"]
+        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -1972,44 +2008,104 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name docker-service-kill
+      - ./experiments -name pod-http-status-code
     command:
       - /bin/bash
     env:
-      - name: TOTAL_CHAOS_DURATION
-        value: "90" # in seconds
+      - name: TARGET_CONTAINER
+        value: ""
 
-      # Period to wait before injection of chaos in sec
-      - name: RAMP_TIME
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+
+      # modified status code for the HTTP response
+      # if no value is provided, a random status code from the supported code list will selected
+      # if an invalid status code is provided, the experiment will fail
+      # supported status code list: [200, 201, 202, 204, 300, 301, 302, 304, 307, 400, 401, 403, 404, 500, 501, 502, 503, 504]
+      - name: STATUS_CODE
         value: ""
 
-      - name: NODE_LABEL
+      #  whether to modify the body as per the status code provided
+      - name: "MODIFY_RESPONSE_BODY"
+        value: "true"
+
+      # provide the body string to overwrite the response body. This will be used only if MODIFY_RESPONSE_BODY is set to true
+      - name: RESPONSE_BODY
         value: ""
 
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "ubuntu:16.04"
+      # provide the encoding type for the response body
+      # currently supported value are gzip, deflate
+      # if empty no encoding will be applied
+      - name: CONTENT_ENCODING
+        value: ""
 
-      # provide the target node name
-      - name: TARGET_NODE
+      # provide the content type for the response body
+      - name: CONTENT_TYPE
+        value: "text/plain"
+
+      # port of the target service
+      - name: TARGET_SERVICE_PORT
+        value: "80"
+
+      # toxicity is the probability of the request to be affected
+      # provide the percentage value in the range of 0-100
+      # 0 means no request will be affected and 100 means all request will be affected
+      - name: TOXICITY
+        value: "100"
+
+      # port on which the proxy will listen
+      - name: PROXY_PORT
+        value: "20000"
+
+      # network interface on which the proxy will listen
+      - name: NETWORK_INTERFACE
+        value: "eth0"
+
+      - name: TOTAL_CHAOS_DURATION
+        value: "60" # in seconds
+
+      # Time period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
+      # percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
+        value: ""
+
+      - name: TARGET_PODS
+        value: ""
+
+      # provide the name of container runtime
+      # for litmus LIB, it supports docker, containerd, crio
+      # for pumba LIB, it supports docker only
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
+        value: ""
+
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
+
     labels:
-      name: docker-service-kill
+      name: pod-http-status-code
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/service-kill: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -2017,12 +2113,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It injects chaos inside the pod which modifies the status code of the response from the provided application server to desired status code provided by the user and reverts after a specified duration
+    IO stress on a app pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-http-status-code
+  name: pod-io-stress
   labels:
-    name: pod-http-status-code
+    name: pod-io-stress
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -2087,80 +2183,58 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-http-status-code
+      - ./experiments -name pod-io-stress
     command:
       - /bin/bash
     env:
-      - name: TARGET_CONTAINER
-        value: ""
-
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      # modified status code for the HTTP response
-      # if no value is provided, a random status code from the supported code list will selected
-      # if an invalid status code is provided, the experiment will fail
-      # supported status code list: [200, 201, 202, 204, 300, 301, 302, 304, 307, 400, 401, 403, 404, 500, 501, 502, 503, 504]
-      - name: STATUS_CODE
-        value: ""
-
-      #  whether to modify the body as per the status code provided
-      - name: "MODIFY_RESPONSE_BODY"
-        value: "true"
+      - name: TOTAL_CHAOS_DURATION
+        value: "120"
 
-      # provide the body string to overwrite the response body. This will be used only if MODIFY_RESPONSE_BODY is set to true
-      - name: RESPONSE_BODY
-        value: ""
+      ## specify the size as percentage of free space on the file system
+      ## default value 90 (in percentage)
+      - name: FILESYSTEM_UTILIZATION_PERCENTAGE
+        value: "10"
 
-      # provide the encoding type for the response body
-      # currently supported value are gzip, deflate
-      # if empty no encoding will be applied
-      - name: CONTENT_ENCODING
+      ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space
+      ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty
+      - name: FILESYSTEM_UTILIZATION_BYTES
         value: ""
 
-      # provide the content type for the response body
-      - name: CONTENT_TYPE
-        value: "text/plain"
-
-      # port of the target service
-      - name: TARGET_SERVICE_PORT
-        value: "80"
-
-      # toxicity is the probability of the request to be affected
-      # provide the percentage value in the range of 0-100
-      # 0 means no request will be affected and 100 means all request will be affected
-      - name: TOXICITY
-        value: "100"
-
-      # port on which the proxy will listen
-      - name: PROXY_PORT
-        value: "20000"
-
-      # network interface on which the proxy will listen
-      - name: NETWORK_INTERFACE
-        value: "eth0"
-
-      - name: TOTAL_CHAOS_DURATION
-        value: "60" # in seconds
+      ## Total number of workers default value is 4
+      - name: NUMBER_OF_WORKERS
+        value: "4"
 
-      # Time period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
+      ## Percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      # provide volume mount path
+      - name: VOLUME_MOUNT_PATH
+        value: ""
 
-      # percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
+      - name: TARGET_CONTAINER
         value: ""
 
+      ## specify the comma separated target pods
       - name: TARGET_PODS
         value: ""
 
-      # provide the name of container runtime
-      # for litmus LIB, it supports docker, containerd, crio
-      # for pumba LIB, it supports docker only
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
+        value: ""
+
+      # Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
+        value: ""
+
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
+      ## provide the cluster runtime
       - name: CONTAINER_RUNTIME
         value: "containerd"
 
@@ -2168,19 +2242,16 @@ spec:
       - name: SOCKET_PATH
         value: "/run/containerd/containerd.sock"
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
-        value: ""
-
       ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: pod-http-status-code
+      name: pod-io-stress
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -2188,18 +2259,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Deletes a pod belonging to a deployment/statefulset/daemonset
+    Give a memory hog on a node belonging to a deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-delete
+  name: node-memory-hog
   labels:
-    name: pod-delete
+    name: node-memory-hog
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Namespaced
+    scope: Cluster
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -2230,22 +2301,6 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-      - apiGroups: ["apps"]
-        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: ["apps.openshift.io"]
-        resources: ["deploymentconfigs"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: [""]
-        resources: ["replicationcontrollers"]
-        verbs: ["get", "list"]
-      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-      - apiGroups: ["argoproj.io"]
-        resources: ["rollouts"]
-        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -2254,51 +2309,63 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-delete
+      - ./experiments -name node-memory-hog
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "15"
+        value: "120"
 
-      # Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
+      ## Specify the size as percent of total node capacity Ex: '30'
+      ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty
+      - name: MEMORY_CONSUMPTION_PERCENTAGE
         value: ""
 
-      - name: FORCE
-        value: "true"
+      ## Specify the amount of memory to be consumed in mebibytes
+      ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty
+      - name: MEMORY_CONSUMPTION_MEBIBYTES
+        value: ""
 
-      - name: CHAOS_INTERVAL
-        value: "5"
+      - name: NUMBER_OF_WORKERS
+        value: "1"
 
-      ## percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
+      # ENTER THE COMMA SEPARATED TARGET NODES NAME
+      - name: TARGET_NODES
         value: ""
 
-      - name: TARGET_CONTAINER
+      - name: NODE_LABEL
         value: ""
 
-      - name: TARGET_PODS
+      # Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
+      ## percentage of total nodes to target
+      - name: NODES_AFFECTED_PERC
         value: ""
 
-      ## it defines the sequence of chaos execution for multiple target pods
+      ## it defines the sequence of chaos execution for multiple target nodes
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: pod-delete
+      name: node-memory-hog
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -2308,18 +2375,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Drain the node where application pod is scheduled
+    Injects CPU consumption on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: node-drain
+  name: pod-cpu-hog
   labels:
-    name: node-drain
+    name: pod-cpu-hog
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Cluster
+    scope: Namespaced
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -2348,12 +2415,24 @@ spec:
         verbs: ["get", "list", "watch"]
       # for creating and managing to execute commands inside target container
       - apiGroups: [""]
-        resources: ["pods/exec", "pods/eviction"]
+        resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # ignore daemonsets while draining the node
+      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
       - apiGroups: ["apps"]
-        resources: ["daemonsets"]
-        verbs: ["list", "get", "delete"]
+        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: ["apps.openshift.io"]
+        resources: ["deploymentconfigs"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: [""]
+        resources: ["replicationcontrollers"]
+        verbs: ["get", "list"]
+      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
+      - apiGroups: ["argoproj.io"]
+        resources: ["rollouts"]
+        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -2362,38 +2441,73 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list", "patch"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name node-drain
+      - ./experiments -name pod-cpu-hog
     command:
       - /bin/bash
     env:
-      - name: TARGET_NODE
+      - name: TOTAL_CHAOS_DURATION
+        value: "60"
+
+      ## Number of CPU cores to stress
+      - name: CPU_CORES
+        value: "1"
+
+      ## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
+      ## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
+      - name: CPU_LOAD
+        value: "100"
+
+      ## Percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
-      - name: NODE_LABEL
+      ## Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
-      - name: TOTAL_CHAOS_DURATION
-        value: "60"
+      ## It is used in pumba lib only
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      # Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
+      ## It is used in pumba lib only
+      - name: STRESS_IMAGE
+        value: "alexeiled/stress-ng:latest-ubuntu"
+
+      ## provide the cluster runtime
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
+      - name: TARGET_CONTAINER
+        value: ""
+
+      - name: TARGET_PODS
         value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
+        value: ""
+
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
+
     labels:
-      name: node-drain
+      name: pod-cpu-hog
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -2401,12 +2515,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects memory consumption on pods belonging to an app deployment
+    Deletes a pod belonging to a deployment/statefulset/daemonset
 kind: ChaosExperiment
 metadata:
-  name: pod-memory-hog
+  name: pod-delete
   labels:
-    name: pod-memory-hog
+    name: pod-delete
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -2471,52 +2585,26 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-memory-hog
+      - ./experiments -name pod-delete
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "60"
-
-      ## enter the amount of memory in megabytes to be consumed by the application pod
-      - name: MEMORY_CONSUMPTION
-        value: "500"
-
-      ## Number of workers to perform stress
-      - name: NUMBER_OF_WORKERS
-        value: "1"
-
-      ## percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
-        value: ""
+        value: "15"
 
-      ## Period to wait before and after injection of chaos in sec
+      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      ## It is used in pumba lib only
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      ## It is used in pumba lib only
-      - name: STRESS_IMAGE
-        value: "alexeiled/stress-ng:latest-ubuntu"
-
-      ## provide the cluster runtime
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
-
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
+      - name: FORCE
+        value: "true"
 
-      ## it defines the sequence of chaos execution for multiple target pods
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
+      - name: CHAOS_INTERVAL
+        value: "5"
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      ## percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
+        value: ""
 
       - name: TARGET_CONTAINER
         value: ""
@@ -2524,15 +2612,22 @@ spec:
       - name: TARGET_PODS
         value: ""
 
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
       # To select pods on specific node(s)
       - name: NODE_LABEL
         value: ""
 
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
+
     labels:
-      name: pod-memory-hog
+      name: pod-delete
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -2540,18 +2635,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Taint the node where application pod is scheduled
+    it injects chaos into the pod which stops outgoing HTTP requests by resetting the TCP connection and then reverts back to the original state after a specified duration
 kind: ChaosExperiment
 metadata:
-  name: node-taint
+  name: pod-http-reset-peer
   labels:
-    name: node-taint
+    name: pod-http-reset-peer
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Cluster
+    scope: Namespaced
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -2580,12 +2675,24 @@ spec:
         verbs: ["get", "list", "watch"]
       # for creating and managing to execute commands inside target container
       - apiGroups: [""]
-        resources: ["pods/exec", "pods/eviction"]
+        resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # ignore daemonsets while draining the node
+      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
       - apiGroups: ["apps"]
-        resources: ["daemonsets"]
-        verbs: ["list", "get", "delete"]
+        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: ["apps.openshift.io"]
+        resources: ["deploymentconfigs"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: [""]
+        resources: ["replicationcontrollers"]
+        verbs: ["get", "list"]
+      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
+      - apiGroups: ["argoproj.io"]
+        resources: ["rollouts"]
+        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -2594,41 +2701,81 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list", "patch", "update"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name node-taint
+      - ./experiments -name pod-http-reset-peer
     command:
       - /bin/bash
     env:
-      - name: TARGET_NODE
+      - name: TARGET_CONTAINER
         value: ""
 
-      - name: NODE_LABEL
-        value: ""
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+
+      # reset timeout specifies after how much duration to reset the connection
+      - name: RESET_TIMEOUT
+        value: "0" #in ms
+
+      # port of the target service
+      - name: TARGET_SERVICE_PORT
+        value: "80"
+
+      # toxicity is the probability of the request to be affected
+      # provide the percentage value in the range of 0-100
+      # 0 means no request will be affected and 100 means all request will be affected
+      - name: TOXICITY
+        value: "100"
+
+      # port on which the proxy will listen
+      - name: PROXY_PORT
+        value: "20000"
+
+      # network interface on which the proxy will listen
+      - name: NETWORK_INTERFACE
+        value: "eth0"
 
       - name: TOTAL_CHAOS_DURATION
-        value: "60"
+        value: "60" # in seconds
 
-      # Period to wait before and after injection of chaos in sec
+      # Time period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # set taint label & effect
-      # key=value:effect or key:effect
-      - name: TAINTS
-        value: ""
-
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
+      # percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
+        value: ""
+
+      - name: TARGET_PODS
+        value: ""
+
+      # provide the name of container runtime
+      # for litmus LIB, it supports docker, containerd, crio
+      # for pumba LIB, it supports docker only
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
+      # To select pods on specific node(s)
+      - name: NODE_LABEL
+        value: ""
+
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
+
     labels:
-      name: node-taint
+      name: pod-http-reset-peer
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -2638,12 +2785,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects CPU consumption on pods belonging to an app deployment
+    Injects network packet loss on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-cpu-hog
+  name: pod-network-loss
   labels:
-    name: pod-cpu-hog
+    name: pod-network-loss
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -2708,66 +2855,74 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-cpu-hog
+      - ./experiments -name pod-network-loss
     command:
       - /bin/bash
     env:
-      - name: TOTAL_CHAOS_DURATION
-        value: "60"
-
-      ## Number of CPU cores to stress
-      - name: CPU_CORES
-        value: "1"
-
-      ## LOAD CPU WITH GIVEN PERCENT LOADING FOR THE CPU STRESS WORKERS.
-      ## 0 IS EFFECTIVELY A SLEEP (NO LOAD) AND 100 IS FULL LOADING
-      - name: CPU_LOAD
-        value: "100"
-
-      ## Percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
-        value: ""
-
-      ## Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
+      - name: TARGET_CONTAINER
         value: ""
 
-      ## It is used in pumba lib only
+      # provide lib image
       - name: LIB_IMAGE
         value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      ## It is used in pumba lib only
-      - name: STRESS_IMAGE
-        value: "alexeiled/stress-ng:latest-ubuntu"
+      - name: NETWORK_INTERFACE
+        value: "eth0"
 
-      ## provide the cluster runtime
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
+      - name: TC_IMAGE
+        value: "gaiadocker/iproute2"
 
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
+      - name: NETWORK_PACKET_LOSS_PERCENTAGE
+        value: "100" #in PERCENTAGE
 
-      - name: TARGET_CONTAINER
+      - name: TOTAL_CHAOS_DURATION
+        value: "60" # in seconds
+
+      # time period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
-      - name: TARGET_PODS
+      ## percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
+      - name: TARGET_PODS
+        value: ""
+
       # To select pods on specific node(s)
       - name: NODE_LABEL
         value: ""
 
+      # provide the name of container runtime
+      # for litmus LIB, it supports docker, containerd, crio
+      # for pumba LIB, it supports docker only
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
+
+      # provide the destination ips
+      # chaos injection will be triggered for these destination ips
+      - name: DESTINATION_IPS
+        value: ""
+
+      # provide the destination hosts
+      # chaos injection will be triggered for these destination hosts
+      - name: DESTINATION_HOSTS
+        value: ""
+
+      # provide the socket file path
+      - name: SOCKET_PATH
+        value: "/run/containerd/containerd.sock"
+
       ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: pod-cpu-hog
+      name: pod-network-loss
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/runtime-api-usage: "true"
@@ -2778,12 +2933,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects 100% network packet loss on pods belonging to an app deployment
+    Injects CPU consumption on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-network-partition
+  name: pod-cpu-hog-exec
   labels:
-    name: pod-network-partition
+    name: pod-cpu-hog-exec
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -2820,14 +2975,26 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
+      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
+      - apiGroups: ["apps"]
+        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: ["apps.openshift.io"]
+        resources: ["deploymentconfigs"]
+        verbs: ["list", "get"]
+      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
+      - apiGroups: [""]
+        resources: ["replicationcontrollers"]
+        verbs: ["get", "list"]
+      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
+      - apiGroups: ["argoproj.io"]
+        resources: ["rollouts"]
+        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
         verbs: ["create", "list", "get", "delete", "deletecollection"]
-      # performs CRUD operations on the network policies
-      - apiGroups: ["networking.k8s.io"]
-        resources: ["networkpolicies"]
-        verbs: ["create", "delete", "list", "get"]
       # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
@@ -2836,49 +3003,45 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-network-partition
+      - ./experiments -name pod-cpu-hog-exec
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "60" # in seconds
-
-      # time period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
-        value: ""
+        value: "60"
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      ## Number of CPU cores to stress
+      - name: CPU_CORES
+        value: "1"
 
-      # provide the destination ips
-      # chaos injection will be triggered for these destination ips
-      - name: DESTINATION_IPS
+      ## Percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
-      # provide the destination hosts
-      # chaos injection will be triggered for these destination hosts
-      - name: DESTINATION_HOSTS
+      ## Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
-      # provide network policy type
-      # support ingress, egress, all values
-      - name: POLICY_TYPES
-        value: "all"
+      # The command to kill the chaos process
+      - name: CHAOS_KILL_COMMAND
+        value: "kill $(find /proc -name exe -lname '*/md5sum' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}')"
 
-      # provide labels of the destination pods
-      - name: POD_SELECTOR
+      - name: TARGET_CONTAINER
         value: ""
 
-      # provide labels the destination namespaces
-      - name: NAMESPACE_SELECTOR
+      - name: TARGET_PODS
         value: ""
 
-      # provide comma separated ports
-      - name: PORTS
-        value: ""
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
+
+      ## it defines the sequence of chaos execution for multiple target pods
+      ## supported values: serial, parallel
+      - name: SEQUENCE
+        value: "parallel"
 
     labels:
-      name: pod-network-partition
+      name: pod-cpu-hog-exec
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -2888,12 +3051,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Restart node
+    Taint the node where application pod is scheduled
 kind: ChaosExperiment
 metadata:
-  name: node-restart
+  name: node-taint
   labels:
-    name: node-restart
+    name: node-taint
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -2918,9 +3081,9 @@ spec:
       - apiGroups: [""]
         resources: ["events"]
         verbs: ["create", "get", "list", "patch", "update"]
-      # Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
+      # Fetch configmaps details and mount it to the experiment pod (if specified)
       - apiGroups: [""]
-        resources: ["configmaps", "secrets"]
+        resources: ["configmaps"]
         verbs: ["get", "list"]
       # Track and get the runner, experiment, and helper pods log
       - apiGroups: [""]
@@ -2928,8 +3091,12 @@ spec:
         verbs: ["get", "list", "watch"]
       # for creating and managing to execute commands inside target container
       - apiGroups: [""]
-        resources: ["pods/exec"]
+        resources: ["pods/exec", "pods/eviction"]
         verbs: ["get", "list", "create"]
+      # ignore daemonsets while draining the node
+      - apiGroups: ["apps"]
+        resources: ["daemonsets"]
+        verbs: ["list", "get", "delete"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -2941,19 +3108,20 @@ spec:
       # for experiment to perform node status checks
       - apiGroups: [""]
         resources: ["nodes"]
-        verbs: ["get", "list"]
+        verbs: ["get", "list", "patch", "update"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name node-restart
+      - ./experiments -name node-taint
     command:
       - /bin/bash
     env:
-      - name: SSH_USER
-        value: "root"
-      - name: REBOOT_COMMAND
-        value: "sudo systemctl reboot; true"
+      - name: TARGET_NODE
+        value: ""
+
+      - name: NODE_LABEL
+        value: ""
 
       - name: TOTAL_CHAOS_DURATION
         value: "60"
@@ -2962,44 +3130,31 @@ spec:
       - name: RAMP_TIME
         value: ""
 
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      # ENTER THE TARGET NODE NAME
-      - name: TARGET_NODE
-        value: ""
-
-      - name: NODE_LABEL
-        value: ""
-
-      # ENTER THE TARGET NODE IP
-      - name: TARGET_NODE_IP
+      # set taint label & effect
+      # key=value:effect or key:effect
+      - name: TAINTS
         value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
     labels:
-      name: node-restart
+      name: node-taint
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
-    secrets:
-      - name: id-rsa
-        mountPath: /mnt/
 
 ---
 ---
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects network packet loss on pods belonging to an app deployment
+    Injects HTTP request latency on pods belonging to an app deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-network-loss
+  name: pod-http-latency
   labels:
-    name: pod-network-loss
+    name: pod-http-latency
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -3064,7 +3219,7 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-network-loss
+      - ./experiments -name pod-http-latency
     command:
       - /bin/bash
     env:
@@ -3075,34 +3230,42 @@ spec:
       - name: LIB_IMAGE
         value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      - name: NETWORK_INTERFACE
-        value: "eth0"
+      - name: LATENCY
+        value: "2000" #in ms
 
-      - name: TC_IMAGE
-        value: "gaiadocker/iproute2"
+      # port of the target service
+      - name: TARGET_SERVICE_PORT
+        value: "80"
 
-      - name: NETWORK_PACKET_LOSS_PERCENTAGE
-        value: "100" #in PERCENTAGE
+      # toxicity is the probability of the request to be affected
+      # provide the percentage value in the range of 0-100
+      # 0 means no request will be affected and 100 means all request will be affected
+      - name: TOXICITY
+        value: "100"
+
+      # port on which the proxy will listen
+      - name: PROXY_PORT
+        value: "20000"
+
+      # network interface on which the proxy will listen
+      - name: NETWORK_INTERFACE
+        value: "eth0"
 
       - name: TOTAL_CHAOS_DURATION
         value: "60" # in seconds
 
-      # time period to wait before and after injection of chaos in sec
+      # Time period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      ## percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
-        value: ""
-
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      - name: TARGET_PODS
+      # percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
+      - name: TARGET_PODS
         value: ""
 
       # provide the name of container runtime
@@ -3111,155 +3274,24 @@ spec:
       - name: CONTAINER_RUNTIME
         value: "containerd"
 
-      # provide the destination ips
-      # chaos injection will be triggered for these destination ips
-      - name: DESTINATION_IPS
-        value: ""
-
-      # provide the destination hosts
-      # chaos injection will be triggered for these destination hosts
-      - name: DESTINATION_HOSTS
-        value: ""
-
       # provide the socket file path
       - name: SOCKET_PATH
         value: "/run/containerd/containerd.sock"
 
-      ## it defines the sequence of chaos execution for multiple target pods
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
-
-    labels:
-      name: pod-network-loss
-      app.kubernetes.io/part-of: litmus
-      app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/runtime-api-usage: "true"
-      app.kubernetes.io/version: 3.13.0
-
----
----
-apiVersion: litmuschaos.io/v1alpha1
-description:
-  message: |
-    Give IO disk stress on a node belonging to a deployment
-kind: ChaosExperiment
-metadata:
-  name: node-io-stress
-  labels:
-    name: node-io-stress
-    app.kubernetes.io/part-of: litmus
-    app.kubernetes.io/component: chaosexperiment
-    app.kubernetes.io/version: 3.13.0
-spec:
-  definition:
-    scope: Cluster
-    permissions:
-      # Create and monitor the experiment & helper pods
-      - apiGroups: [""]
-        resources: ["pods"]
-        verbs:
-          [
-            "create",
-            "delete",
-            "get",
-            "list",
-            "patch",
-            "update",
-            "deletecollection",
-          ]
-      # Performs CRUD operations on the events inside chaosengine and chaosresult
-      - apiGroups: [""]
-        resources: ["events"]
-        verbs: ["create", "get", "list", "patch", "update"]
-      # Fetch configmaps details and mount it to the experiment pod (if specified)
-      - apiGroups: [""]
-        resources: ["configmaps"]
-        verbs: ["get", "list"]
-      # Track and get the runner, experiment, and helper pods log
-      - apiGroups: [""]
-        resources: ["pods/log"]
-        verbs: ["get", "list", "watch"]
-      # for creating and managing to execute commands inside target container
-      - apiGroups: [""]
-        resources: ["pods/exec"]
-        verbs: ["get", "list", "create"]
-      # for configuring and monitor the experiment job by the chaos-runner pod
-      - apiGroups: ["batch"]
-        resources: ["jobs"]
-        verbs: ["create", "list", "get", "delete", "deletecollection"]
-      # for creation, status polling and deletion of litmus chaos resources used within a chaos workflow
-      - apiGroups: ["litmuschaos.io"]
-        resources: ["chaosengines", "chaosexperiments", "chaosresults"]
-        verbs: ["create", "list", "get", "patch", "update", "delete"]
-      # for experiment to perform node status checks
-      - apiGroups: [""]
-        resources: ["nodes"]
-        verbs: ["get", "list"]
-    image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-    imagePullPolicy: Always
-    args:
-      - -c
-      - ./experiments -name node-io-stress
-    command:
-      - /bin/bash
-    env:
-      - name: TOTAL_CHAOS_DURATION
-        value: "120"
-
-      ## specify the size as percentage of free space on the file system
-      ## default value 90 (in percentage)
-      - name: FILESYSTEM_UTILIZATION_PERCENTAGE
-        value: "10"
-
-      ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space
-      ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty
-      - name: FILESYSTEM_UTILIZATION_BYTES
-        value: ""
-
-      ## Number of core of CPU
-      - name: CPU
-        value: "1"
-
-      ## Total number of workers default value is 4
-      - name: NUMBER_OF_WORKERS
-        value: "4"
-
-      ## Total number of VM workers
-      - name: VM_WORKERS
-        value: "1"
-
-      ## enter the comma separated target nodes name
-      - name: TARGET_NODES
-        value: ""
-
+      # To select pods on specific node(s)
       - name: NODE_LABEL
         value: ""
 
-      # Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
-        value: ""
-
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      ## percentage of total nodes to target
-      - name: NODES_AFFECTED_PERC
-        value: ""
-
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
-
-      ## it defines the sequence of chaos execution for multiple target nodes
+      ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: node-io-stress
+      name: pod-http-latency
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -3267,12 +3299,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Fillup Ephemeral Storage of a Resource
+    Inject network packet corruption into application pod
 kind: ChaosExperiment
 metadata:
-  name: disk-fill
+  name: pod-network-corruption
   labels:
-    name: disk-fill
+    name: pod-network-corruption
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -3337,56 +3369,66 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name disk-fill
+      - ./experiments -name pod-network-corruption
     command:
       - /bin/bash
     env:
       - name: TARGET_CONTAINER
         value: ""
 
-      - name: FILL_PERCENTAGE
-        value: "80"
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+
+      - name: NETWORK_INTERFACE
+        value: "eth0"
+
+      - name: TC_IMAGE
+        value: "gaiadocker/iproute2"
+
+      - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE
+        value: "100" #in PERCENTAGE
 
       - name: TOTAL_CHAOS_DURATION
-        value: "60"
+        value: "60" # in seconds
 
-      # Period to wait before and after injection of chaos in sec
+      # Time period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      # provide the data block size
-      # supported unit is KB
-      - name: DATA_BLOCK_SIZE
-        value: "256"
-
-      - name: TARGET_PODS
+      ## percentage of total pods to target
+      - name: PODS_AFFECTED_PERC
         value: ""
 
-      - name: EPHEMERAL_STORAGE_MEBIBYTES
+      - name: TARGET_PODS
         value: ""
 
       # To select pods on specific node(s)
       - name: NODE_LABEL
         value: ""
 
-      ## percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
-        value: ""
+      # provide the name of container runtime
+      # for litmus LIB, it supports docker, containerd, crio
+      # for pumba LIB, it supports docker only
+      - name: CONTAINER_RUNTIME
+        value: "containerd"
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      # provide the destination ips
+      # chaos injection will be triggered for these destination ips
+      - name: DESTINATION_IPS
+        value: ""
 
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+      # provide the destination hosts
+      # chaos injection will be triggered for these destination hosts
+      - name: DESTINATION_HOSTS
+        value: ""
 
       # provide the socket file path
       - name: SOCKET_PATH
         value: "/run/containerd/containerd.sock"
 
-      # provide the name of container runtime
-      # it supports docker, containerd, crio
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
       ## it defines the sequence of chaos execution for multiple target pods
       ## supported values: serial, parallel
@@ -3394,10 +3436,10 @@ spec:
         value: "parallel"
 
     labels:
-      name: disk-fill
+      name: pod-network-corruption
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
-      app.kubernetes.io/host-path-usage: "true"
+      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -3405,18 +3447,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    it injects chaos into the pod which stops outgoing HTTP requests by resetting the TCP connection and then reverts back to the original state after a specified duration
+    Restart node
 kind: ChaosExperiment
 metadata:
-  name: pod-http-reset-peer
+  name: node-restart
   labels:
-    name: pod-http-reset-peer
+    name: node-restart
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Namespaced
+    scope: Cluster
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -3435,9 +3477,9 @@ spec:
       - apiGroups: [""]
         resources: ["events"]
         verbs: ["create", "get", "list", "patch", "update"]
-      # Fetch configmaps details and mount it to the experiment pod (if specified)
+      # Fetch configmaps & secrets details and mount it to the experiment pod (if specified)
       - apiGroups: [""]
-        resources: ["configmaps"]
+        resources: ["configmaps", "secrets"]
         verbs: ["get", "list"]
       # Track and get the runner, experiment, and helper pods log
       - apiGroups: [""]
@@ -3447,22 +3489,6 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-      - apiGroups: ["apps"]
-        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: ["apps.openshift.io"]
-        resources: ["deploymentconfigs"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: [""]
-        resources: ["replicationcontrollers"]
-        verbs: ["get", "list"]
-      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-      - apiGroups: ["argoproj.io"]
-        resources: ["rollouts"]
-        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -3471,96 +3497,68 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-http-reset-peer
+      - ./experiments -name node-restart
     command:
       - /bin/bash
     env:
-      - name: TARGET_CONTAINER
-        value: ""
-
-      # provide lib image
-      - name: LIB_IMAGE
-        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
-
-      # reset timeout specifies after how much duration to reset the connection
-      - name: RESET_TIMEOUT
-        value: "0" #in ms
-
-      # port of the target service
-      - name: TARGET_SERVICE_PORT
-        value: "80"
-
-      # toxicity is the probability of the request to be affected
-      # provide the percentage value in the range of 0-100
-      # 0 means no request will be affected and 100 means all request will be affected
-      - name: TOXICITY
-        value: "100"
-
-      # port on which the proxy will listen
-      - name: PROXY_PORT
-        value: "20000"
-
-      # network interface on which the proxy will listen
-      - name: NETWORK_INTERFACE
-        value: "eth0"
+      - name: SSH_USER
+        value: "root"
+      - name: REBOOT_COMMAND
+        value: "sudo systemctl reboot; true"
 
       - name: TOTAL_CHAOS_DURATION
-        value: "60" # in seconds
+        value: "60"
 
-      # Time period to wait before and after injection of chaos in sec
+      # Period to wait before and after injection of chaos in sec
       - name: RAMP_TIME
         value: ""
 
-      - name: DEFAULT_HEALTH_CHECK
-        value: "false"
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      # percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
+      # ENTER THE TARGET NODE NAME
+      - name: TARGET_NODE
         value: ""
 
-      - name: TARGET_PODS
+      - name: NODE_LABEL
         value: ""
 
-      # provide the name of container runtime
-      # for litmus LIB, it supports docker, containerd, crio
-      # for pumba LIB, it supports docker only
-      - name: CONTAINER_RUNTIME
-        value: "containerd"
-
-      # provide the socket file path
-      - name: SOCKET_PATH
-        value: "/run/containerd/containerd.sock"
-
-      # To select pods on specific node(s)
-      - name: NODE_LABEL
+      # ENTER THE TARGET NODE IP
+      - name: TARGET_NODE_IP
         value: ""
 
-      ## it defines the sequence of chaos execution for multiple target pods
-      ## supported values: serial, parallel
-      - name: SEQUENCE
-        value: "parallel"
+      - name: DEFAULT_HEALTH_CHECK
+        value: "false"
 
     labels:
-      name: pod-http-reset-peer
+      name: node-restart
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
+    secrets:
+      - name: id-rsa
+        mountPath: /mnt/
 
 ---
 ---
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It injects the chaos inside the pod which modifies the body of the response from the provided application server to the body string provided by the user and reverts after a specified duration
+    It injects the chaos inside the pod which modifies the header of the request/response from the provided application server to the headers provided by the user and reverts after a specified duration
 kind: ChaosExperiment
 metadata:
-  name: pod-http-modify-body
+  name: pod-http-modify-header
   labels:
-    name: pod-http-modify-body
+    name: pod-http-modify-header
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -3625,7 +3623,7 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-http-modify-body
+      - ./experiments -name pod-http-modify-header
     command:
       - /bin/bash
     env:
@@ -3636,20 +3634,14 @@ spec:
       - name: LIB_IMAGE
         value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
 
-      # provide the body string to overwrite the response body
-      # if no value is provided, response will be an empty body.
-      - name: RESPONSE_BODY
-        value: ""
-
-      # provide the encoding type for the response body
-      # currently supported value are gzip, deflate
-      # if empty no encoding will be applied
-      - name: CONTENT_ENCODING
-        value: ""
+      # map of headers to modify/add; Eg: {"X-Litmus-Test-Header": "X-Litmus-Test-Value"}
+      # to remove a header, just set the value to ""; Eg: {"X-Litmus-Test-Header": ""}
+      - name: HEADERS_MAP
+        value: "{}"
 
-      # provide the content type for the response body
-      - name: CONTENT_TYPE
-        value: "text/plain"
+      # whether to modify response headers or request headers. Accepted values: request, response
+      - name: HEADER_MODE
+        value: "response"
 
       # port of the target service
       - name: TARGET_SERVICE_PORT
@@ -3706,9 +3698,10 @@ spec:
         value: "parallel"
 
     labels:
-      name: pod-http-modify-body
+      name: pod-http-modify-header
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
+      app.kubernetes.io/runtime-api-usage: "true"
       app.kubernetes.io/version: 3.13.0
 
 ---
@@ -3716,18 +3709,18 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    Injects CPU consumption on pods belonging to an app deployment
+    Give IO disk stress on a node belonging to a deployment
 kind: ChaosExperiment
 metadata:
-  name: pod-cpu-hog-exec
+  name: node-io-stress
   labels:
-    name: pod-cpu-hog-exec
+    name: node-io-stress
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
 spec:
   definition:
-    scope: Namespaced
+    scope: Cluster
     permissions:
       # Create and monitor the experiment & helper pods
       - apiGroups: [""]
@@ -3758,22 +3751,6 @@ spec:
       - apiGroups: [""]
         resources: ["pods/exec"]
         verbs: ["get", "list", "create"]
-      # deriving the parent/owner details of the pod(if parent is anyof {deployment, statefulset, daemonsets})
-      - apiGroups: ["apps"]
-        resources: ["deployments", "statefulsets", "replicasets", "daemonsets"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: ["apps.openshift.io"]
-        resources: ["deploymentconfigs"]
-        verbs: ["list", "get"]
-      # deriving the parent/owner details of the pod(if parent is deploymentConfig)
-      - apiGroups: [""]
-        resources: ["replicationcontrollers"]
-        verbs: ["get", "list"]
-      # deriving the parent/owner details of the pod(if parent is argo-rollouts)
-      - apiGroups: ["argoproj.io"]
-        resources: ["rollouts"]
-        verbs: ["list", "get"]
       # for configuring and monitor the experiment job by the chaos-runner pod
       - apiGroups: ["batch"]
         resources: ["jobs"]
@@ -3782,49 +3759,72 @@ spec:
       - apiGroups: ["litmuschaos.io"]
         resources: ["chaosengines", "chaosexperiments", "chaosresults"]
         verbs: ["create", "list", "get", "patch", "update", "delete"]
+      # for experiment to perform node status checks
+      - apiGroups: [""]
+        resources: ["nodes"]
+        verbs: ["get", "list"]
     image: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name pod-cpu-hog-exec
+      - ./experiments -name node-io-stress
     command:
       - /bin/bash
     env:
       - name: TOTAL_CHAOS_DURATION
-        value: "60"
+        value: "120"
 
-      ## Number of CPU cores to stress
-      - name: CPU_CORES
-        value: "1"
+      ## specify the size as percentage of free space on the file system
+      ## default value 90 (in percentage)
+      - name: FILESYSTEM_UTILIZATION_PERCENTAGE
+        value: "10"
 
-      ## Percentage of total pods to target
-      - name: PODS_AFFECTED_PERC
+      ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space
+      ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty
+      - name: FILESYSTEM_UTILIZATION_BYTES
         value: ""
 
-      ## Period to wait before and after injection of chaos in sec
-      - name: RAMP_TIME
+      ## Number of core of CPU
+      - name: CPU
+        value: "1"
+
+      ## Total number of workers default value is 4
+      - name: NUMBER_OF_WORKERS
+        value: "4"
+
+      ## Total number of VM workers
+      - name: VM_WORKERS
+        value: "1"
+
+      ## enter the comma separated target nodes name
+      - name: TARGET_NODES
         value: ""
 
-      # The command to kill the chaos process
-      - name: CHAOS_KILL_COMMAND
-        value: "kill $(find /proc -name exe -lname '*/md5sum' 2>&1 | grep -v 'Permission denied' | awk -F/ '{print $(NF-1)}')"
+      - name: NODE_LABEL
+        value: ""
 
-      - name: TARGET_CONTAINER
+      # Period to wait before and after injection of chaos in sec
+      - name: RAMP_TIME
         value: ""
 
-      - name: TARGET_PODS
+      # provide lib image
+      - name: LIB_IMAGE
+        value: "litmuschaos.docker.scarf.sh/litmuschaos/go-runner:3.13.0"
+
+      ## percentage of total nodes to target
+      - name: NODES_AFFECTED_PERC
         value: ""
 
       - name: DEFAULT_HEALTH_CHECK
         value: "false"
 
-      ## it defines the sequence of chaos execution for multiple target pods
+      ## it defines the sequence of chaos execution for multiple target nodes
       ## supported values: serial, parallel
       - name: SEQUENCE
         value: "parallel"
 
     labels:
-      name: pod-cpu-hog-exec
+      name: node-io-stress
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
diff --git a/faults/spring-boot/experiments.yaml b/faults/spring-boot/experiments.yaml
index a8b8fb7f1..47dc3b281 100644
--- a/faults/spring-boot/experiments.yaml
+++ b/faults/spring-boot/experiments.yaml
@@ -108,9 +108,9 @@ description:
     It can target random pods with a Spring Boot application and allows configuring the assaults to inject cpu-stress
 kind: ChaosExperiment
 metadata:
-  name: spring-boot-faults
+  name: spring-boot-cpu-stress
   labels:
-    name: spring-boot-faults
+    name: spring-boot-cpu-stress
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -155,57 +155,15 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name spring-boot-faults
+      - ./experiments -name spring-boot-cpu-stress
     command:
       - /bin/bash
     env:
-      # it enables spring app-kill fault
-      - name: CM_KILL_APPLICATION_ACTIVE
-        value: ""
-
-      # it enables spring-boot latency fault
-      - name: CM_LATENCY_ACTIVE
-        value: ""
-
-      # provide the latency (ms)
-      # it is applicable when latency is active
-      - name: LATENCY
-        value: "2000"
-
-      # it enables spring-boot memory stress fault
-      - name: CM_MEMORY_ACTIVE
-        value: ""
-
-      # it contains fraction of memory to be stressed(0.70 equals 70%)
-      # it supports value in range [0.01,0.95]
-      # it is applicable when memory is active
-      - name: MEMORY_FILL_FRACTION
-        value: "0.70"
-
-      # it enables spring-boot cpu stress fault
-      - name: CM_CPU_ACTIVE
-        value: ""
-
       # it contains fraction of cpu to be stressed(0.95 equals 95%)
       # it supports value in range [0.1,1.0]
-      # it is applicable when cpu is active
       - name: CPU_LOAD_FRACTION
         value: "0.9"
 
-      # it enables spring-boot exceptions fault
-      - name: CM_EXCEPTIONS_ACTIVE
-        value: ""
-
-      # Type of raised exception
-      # it is applicable when exceptions is active
-      - name: CM_EXCEPTIONS_TYPE
-        value: "java.lang.IllegalArgumentException"
-
-        # Argument of raised exception
-        # it is applicable when exceptions is active
-      - name: CM_EXCEPTIONS_ARGUMENTS
-        value: "java.lang.String:custom illegal argument exception"
-
       # port of the spring boot application
       - name: CM_PORT
         value: ""
@@ -241,7 +199,7 @@ spec:
         value: "parallel"
 
     labels:
-      name: spring-boot-faults
+      name: spring-boot-cpu-stress
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -251,12 +209,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It can target random pods with a Spring Boot application and allows configuring the assaults to inject cpu-stress
+    It can target random pods with a Spring Boot application and allows configuring the assaults to inject memory-stress
 kind: ChaosExperiment
 metadata:
-  name: spring-boot-cpu-stress
+  name: spring-boot-memory-stress
   labels:
-    name: spring-boot-cpu-stress
+    name: spring-boot-memory-stress
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -301,25 +259,25 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name spring-boot-cpu-stress
+      - ./experiments -name spring-boot-memory-stress
     command:
       - /bin/bash
     env:
-      # it contains fraction of cpu to be stressed(0.95 equals 95%)
-      # it supports value in range [0.1,1.0]
-      - name: CPU_LOAD_FRACTION
-        value: "0.9"
+      # it contains fraction of memory to be stressed(0.70 equals 70%)
+      # it supports value in range [0.01,0.95]
+      - name: MEMORY_FILL_FRACTION
+        value: "0.70"
 
-      # port of the spring boot application
+        # port of the spring boot application
       - name: CM_PORT
         value: ""
 
-      #it contains number of requests are to be attacked
-      # n value means nth request will be affected
+        # it contains number of requests are to be attacked
+        # n value means nth request will be affected
       - name: CM_LEVEL
         value: "1"
 
-      # it limits watched packages/classes/methods
+        # it limits watched packages/classes/methods
       - name: CM_WATCHED_CUSTOM_SERVICES
         value: ""
 
@@ -345,7 +303,7 @@ spec:
         value: "parallel"
 
     labels:
-      name: spring-boot-cpu-stress
+      name: spring-boot-memory-stress
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -355,12 +313,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency
+    It can target random pods with a Spring Boot application and allows configuring the assaults to inject cpu-stress
 kind: ChaosExperiment
 metadata:
-  name: spring-boot-exceptions
+  name: spring-boot-faults
   labels:
-    name: spring-boot-exceptions
+    name: spring-boot-faults
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -405,15 +363,54 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name spring-boot-exceptions
+      - ./experiments -name spring-boot-faults
     command:
       - /bin/bash
     env:
+      # it enables spring app-kill fault
+      - name: CM_KILL_APPLICATION_ACTIVE
+        value: ""
+
+      # it enables spring-boot latency fault
+      - name: CM_LATENCY_ACTIVE
+        value: ""
+
+      # provide the latency (ms)
+      # it is applicable when latency is active
+      - name: LATENCY
+        value: "2000"
+
+      # it enables spring-boot memory stress fault
+      - name: CM_MEMORY_ACTIVE
+        value: ""
+
+      # it contains fraction of memory to be stressed(0.70 equals 70%)
+      # it supports value in range [0.01,0.95]
+      # it is applicable when memory is active
+      - name: MEMORY_FILL_FRACTION
+        value: "0.70"
+
+      # it enables spring-boot cpu stress fault
+      - name: CM_CPU_ACTIVE
+        value: ""
+
+      # it contains fraction of cpu to be stressed(0.95 equals 95%)
+      # it supports value in range [0.1,1.0]
+      # it is applicable when cpu is active
+      - name: CPU_LOAD_FRACTION
+        value: "0.9"
+
+      # it enables spring-boot exceptions fault
+      - name: CM_EXCEPTIONS_ACTIVE
+        value: ""
+
       # Type of raised exception
+      # it is applicable when exceptions is active
       - name: CM_EXCEPTIONS_TYPE
         value: "java.lang.IllegalArgumentException"
 
         # Argument of raised exception
+        # it is applicable when exceptions is active
       - name: CM_EXCEPTIONS_ARGUMENTS
         value: "java.lang.String:custom illegal argument exception"
 
@@ -452,7 +449,7 @@ spec:
         value: "parallel"
 
     labels:
-      name: spring-boot-exceptions
+      name: spring-boot-faults
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -462,12 +459,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It can target random pods with a Spring Boot application and allows configuring the assaults to inject app-kill
+    It can target random pods with a Spring Boot application and allows configuring the assaults to inject network latency
 kind: ChaosExperiment
 metadata:
-  name: spring-boot-app-kill
+  name: spring-boot-exceptions
   labels:
-    name: spring-boot-app-kill
+    name: spring-boot-exceptions
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -512,10 +509,18 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name spring-boot-app-kill
+      - ./experiments -name spring-boot-exceptions
     command:
       - /bin/bash
     env:
+      # Type of raised exception
+      - name: CM_EXCEPTIONS_TYPE
+        value: "java.lang.IllegalArgumentException"
+
+        # Argument of raised exception
+      - name: CM_EXCEPTIONS_ARGUMENTS
+        value: "java.lang.String:custom illegal argument exception"
+
       # port of the spring boot application
       - name: CM_PORT
         value: ""
@@ -534,6 +539,9 @@ spec:
       - name: CM_WATCHERS
         value: "restController"
 
+      - name: TOTAL_CHAOS_DURATION
+        value: "30"
+
       ## percentage of total pods to target
       - name: PODS_AFFECTED_PERC
         value: ""
@@ -548,7 +556,7 @@ spec:
         value: "parallel"
 
     labels:
-      name: spring-boot-app-kill
+      name: spring-boot-exceptions
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0
@@ -558,12 +566,12 @@ spec:
 apiVersion: litmuschaos.io/v1alpha1
 description:
   message: |
-    It can target random pods with a Spring Boot application and allows configuring the assaults to inject memory-stress
+    It can target random pods with a Spring Boot application and allows configuring the assaults to inject app-kill
 kind: ChaosExperiment
 metadata:
-  name: spring-boot-memory-stress
+  name: spring-boot-app-kill
   labels:
-    name: spring-boot-memory-stress
+    name: spring-boot-app-kill
     app.kubernetes.io/part-of: litmus
     app.kubernetes.io/component: chaosexperiment
     app.kubernetes.io/version: 3.13.0
@@ -608,25 +616,20 @@ spec:
     imagePullPolicy: Always
     args:
       - -c
-      - ./experiments -name spring-boot-memory-stress
+      - ./experiments -name spring-boot-app-kill
     command:
       - /bin/bash
     env:
-      # it contains fraction of memory to be stressed(0.70 equals 70%)
-      # it supports value in range [0.01,0.95]
-      - name: MEMORY_FILL_FRACTION
-        value: "0.70"
-
-        # port of the spring boot application
+      # port of the spring boot application
       - name: CM_PORT
         value: ""
 
-        # it contains number of requests are to be attacked
-        # n value means nth request will be affected
+      #it contains number of requests are to be attacked
+      # n value means nth request will be affected
       - name: CM_LEVEL
         value: "1"
 
-        # it limits watched packages/classes/methods
+      # it limits watched packages/classes/methods
       - name: CM_WATCHED_CUSTOM_SERVICES
         value: ""
 
@@ -635,9 +638,6 @@ spec:
       - name: CM_WATCHERS
         value: "restController"
 
-      - name: TOTAL_CHAOS_DURATION
-        value: "30"
-
       ## percentage of total pods to target
       - name: PODS_AFFECTED_PERC
         value: ""
@@ -652,7 +652,7 @@ spec:
         value: "parallel"
 
     labels:
-      name: spring-boot-memory-stress
+      name: spring-boot-app-kill
       app.kubernetes.io/part-of: litmus
       app.kubernetes.io/component: experiment-job
       app.kubernetes.io/version: 3.13.0