diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3eb07453b..db0b66c9f 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -12,19 +12,12 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} - #TODO: Add Dockerfile linting - # Running go-lint - - name: Checking Go-Lint - run : | - sudo apt-get update && sudo apt-get install golint - make gotasks - - name: gofmt check run: | if [ "$(gofmt -s -l . | wc -l)" -ne 0 ] @@ -33,9 +26,21 @@ jobs: gofmt -s -l . exit 1 fi - + - name: golangci-lint - uses: reviewdog/action-golangci-lint@v1 + uses: reviewdog/action-golangci-lint@v1 + + gitleaks-scan: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + - name: Run GitLeaks + run: | + wget https://github.com/gitleaks/gitleaks/releases/download/v8.18.2/gitleaks_8.18.2_linux_x64.tar.gz && \ + tar -zxvf gitleaks_8.18.2_linux_x64.tar.gz && \ + sudo mv gitleaks /usr/local/bin && gitleaks detect --source . -v build: needs: pre-checks @@ -44,7 +49,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 with: @@ -68,6 +73,7 @@ jobs: file: build/Dockerfile platforms: linux/amd64,linux/arm64 tags: litmuschaos/go-runner:ci + build-args: LITMUS_VERSION=3.10.0 trivy: needs: pre-checks @@ -79,8 +85,8 @@ jobs: - name: Build an image from Dockerfile run: | - docker build -f build/Dockerfile -t docker.io/litmuschaos/go-runner:${{ github.sha }} . --build-arg TARGETARCH=amd64 - + docker build -f build/Dockerfile -t docker.io/litmuschaos/go-runner:${{ github.sha }} . --build-arg TARGETARCH=amd64 --build-arg LITMUS_VERSION=3.10.0 + - name: Run Trivy vulnerability scanner uses: aquasecurity/trivy-action@master with: @@ -89,4 +95,4 @@ jobs: exit-code: '1' ignore-unfixed: true vuln-type: 'os,library' - severity: 'CRITICAL,HIGH' + severity: 'CRITICAL,HIGH' \ No newline at end of file diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index e093af663..68fbcdafc 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -13,16 +13,9 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 - #TODO: Add Dockerfile linting - # Running go-lint - - name: Checking Go-Lint - run : | - sudo apt-get update && sudo apt-get install golint - make gotasks - - name: gofmt check run: | if [ "$(gofmt -s -l . | wc -l)" -ne 0 ] @@ -31,9 +24,9 @@ jobs: gofmt -s -l . exit 1 fi - + - name: golangci-lint - uses: reviewdog/action-golangci-lint@v1 + uses: reviewdog/action-golangci-lint@v1 push: needs: pre-checks @@ -43,7 +36,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 - name: Set up QEMU @@ -70,3 +63,4 @@ jobs: file: build/Dockerfile platforms: linux/amd64,linux/arm64 tags: litmuschaos/go-runner:ci + build-args: LITMUS_VERSION=3.10.0 \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fcdf080ce..8a633e7a6 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,15 +12,9 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 - #TODO: Add Dockerfile linting - # Running go-lint - - name: Checking Go-Lint - run : | - sudo apt-get update && sudo apt-get install golint - make gotasks push: needs: pre-checks runs-on: ubuntu-latest @@ -28,7 +22,7 @@ jobs: # Install golang - uses: actions/setup-go@v2 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 - name: Set Tag @@ -68,3 +62,4 @@ jobs: file: build/Dockerfile platforms: linux/amd64,linux/arm64 tags: litmuschaos/go-runner:${{ env.RELEASE_TAG }},litmuschaos/go-runner:latest + build-args: LITMUS_VERSION=3.10.0 \ No newline at end of file diff --git a/.github/workflows/run-e2e-on-pr-commits.yml b/.github/workflows/run-e2e-on-pr-commits.yml index bfe84f9e3..333c02922 100644 --- a/.github/workflows/run-e2e-on-pr-commits.yml +++ b/.github/workflows/run-e2e-on-pr-commits.yml @@ -9,215 +9,15 @@ on: - '**.yaml' jobs: - # Helm_Install_Generic_Tests: - # runs-on: ubuntu-18.04 - # steps: - - # - uses: actions/checkout@v2 - # with: - # ref: ${{ github.event.pull_request.head.sha }} - - # - name: Generate go binary and build docker image - # run: make build-amd64 - - # #Install and configure a kind cluster - # - name: Installing KinD cluster for the test - # uses: engineerd/setup-kind@v0.5.0 - # with: - # version: "v0.7.0" - # config: "build/kind-cluster/kind-config.yaml" - - # - name: Configuring and testing the Installation - # run: | - # kubectl taint nodes kind-control-plane node-role.kubernetes.io/master- - # kind get kubeconfig --internal >$HOME/.kube/config - # kubectl cluster-info --context kind-kind - # kubectl get nodes - - # - name: Load docker image - # run: /usr/local/bin/kind load docker-image litmuschaos/go-runner:ci - - # - name: Deploy a sample application for chaos injection - # run: | - # kubectl apply -f https://raw.githubusercontent.com/litmuschaos/chaos-ci-lib/master/app/nginx.yml - # kubectl wait --for=condition=Ready pods --all --namespace default --timeout=90s - - # - name: Setting up kubeconfig ENV for Github Chaos Action - # run: echo ::set-env name=KUBE_CONFIG_DATA::$(base64 -w 0 ~/.kube/config) - # env: - # ACTIONS_ALLOW_UNSECURE_COMMANDS: true - - # - name: Setup Litmus - # uses: litmuschaos/github-chaos-actions@master - # env: - # INSTALL_LITMUS: true - - # - name: Running Litmus pod delete chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-delete - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - - # - name: Running container kill chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: container-kill - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # CONTAINER_RUNTIME: containerd - - # - name: Running node-cpu-hog chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: node-cpu-hog - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - - - # - name: Running node-memory-hog chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: node-memory-hog - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - - # - name: Running pod-cpu-hog chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-cpu-hog - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TARGET_CONTAINER: nginx - # TOTAL_CHAOS_DURATION: 60 - # CPU_CORES: 1 - - # - name: Running pod-memory-hog chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-memory-hog - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TARGET_CONTAINER: nginx - # TOTAL_CHAOS_DURATION: 60 - # MEMORY_CONSUMPTION: 500 - - # - name: Running pod network corruption chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-network-corruption - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TARGET_CONTAINER: nginx - # TOTAL_CHAOS_DURATION: 60 - # NETWORK_INTERFACE: eth0 - # CONTAINER_RUNTIME: containerd - - # - name: Running pod network duplication chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-network-duplication - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TARGET_CONTAINER: nginx - # TOTAL_CHAOS_DURATION: 60 - # NETWORK_INTERFACE: eth0 - # CONTAINER_RUNTIME: containerd - - # - name: Running pod-network-latency chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-network-latency - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TARGET_CONTAINER: nginx - # TOTAL_CHAOS_DURATION: 60 - # NETWORK_INTERFACE: eth0 - # NETWORK_LATENCY: 60000 - # CONTAINER_RUNTIME: containerd - - # - name: Running pod-network-loss chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-network-loss - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TARGET_CONTAINER: nginx - # TOTAL_CHAOS_DURATION: 60 - # NETWORK_INTERFACE: eth0 - # NETWORK_PACKET_LOSS_PERCENTAGE: 100 - # CONTAINER_RUNTIME: containerd - - # - name: Running pod autoscaler chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: pod-autoscaler - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TOTAL_CHAOS_DURATION: 60 - - # - name: Running node-io-stress chaos experiment - # if: always() - # uses: litmuschaos/github-chaos-actions@master - # env: - # EXPERIMENT_NAME: node-io-stress - # EXPERIMENT_IMAGE: litmuschaos/go-runner - # EXPERIMENT_IMAGE_TAG: ci - # IMAGE_PULL_POLICY: IfNotPresent - # JOB_CLEANUP_POLICY: delete - # TOTAL_CHAOS_DURATION: 120 - # FILESYSTEM_UTILIZATION_PERCENTAGE: 10 - - # - name: Uninstall Litmus - # uses: litmuschaos/github-chaos-actions@master - # env: - # LITMUS_CLEANUP: true - - # - name: Deleting KinD cluster - # if: always() - # run: kind delete cluster Pod_Level_In_Serial_Mode: runs-on: ubuntu-latest steps: # Install golang - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v5 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 with: @@ -226,15 +26,28 @@ jobs: - name: Generating Go binary and Building docker image run: | make build-amd64 - #Install and configure a kind cluster - - name: Installing Prerequisites (K3S Cluster) - env: - KUBECONFIG: /etc/rancher/k3s/k3s.yaml + + - name: Install KinD + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.22.0/kind-linux-amd64 + chmod +x ./kind + mv ./kind /usr/local/bin/kind + + - name: Create KinD Cluster + run: | + kind create cluster --config build/kind-cluster/kind-config.yaml + + - name: Configuring and testing the Installation run: | - curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.21.11+k3s1 sh -s - --docker --write-kubeconfig-mode 664 + kubectl taint nodes kind-control-plane node-role.kubernetes.io/control-plane- + kubectl cluster-info --context kind-kind kubectl wait node --all --for condition=ready --timeout=90s kubectl get nodes - + + - name: Load image on the nodes of the cluster + run: | + kind load docker-image --name=kind litmuschaos/go-runner:ci + - uses: actions/checkout@v2 with: repository: 'litmuschaos/litmus-e2e' @@ -244,23 +57,24 @@ jobs: env: GO_EXPERIMENT_IMAGE: litmuschaos/go-runner:ci EXPERIMENT_IMAGE_PULL_POLICY: IfNotPresent - KUBECONFIG: /etc/rancher/k3s/k3s.yaml + KUBECONFIG: /home/runner/.kube/config run: | make build-litmus make app-deploy make pod-affected-perc-ton-series - - name: Deleting K3S cluster + + - name: Deleting KinD cluster if: always() - run: /usr/local/bin/k3s-uninstall.sh + run: kind delete cluster Pod_Level_In_Parallel_Mode: runs-on: ubuntu-latest steps: # Install golang - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v5 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 with: @@ -269,14 +83,30 @@ jobs: - name: Generating Go binary and Building docker image run: | make build-amd64 - #Install and configure a kind cluster - - name: Installing Prerequisites (K3S Cluster) - env: - KUBECONFIG: /etc/rancher/k3s/k3s.yaml + + - name: Install KinD + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.22.0/kind-linux-amd64 + chmod +x ./kind + mv ./kind /usr/local/bin/kind + + - name: Create KinD Cluster + run: | + kind create cluster --config build/kind-cluster/kind-config.yaml + + - name: Configuring and testing the Installation + env: + KUBECONFIG: /home/runner/.kube/config run: | - curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.21.11+k3s1 sh -s - --docker --write-kubeconfig-mode 664 + kubectl taint nodes kind-control-plane node-role.kubernetes.io/control-plane- + kubectl cluster-info --context kind-kind kubectl wait node --all --for condition=ready --timeout=90s kubectl get nodes + + - name: Load image on the nodes of the cluster + run: | + kind load docker-image --name=kind litmuschaos/go-runner:ci + - uses: actions/checkout@v2 with: repository: 'litmuschaos/litmus-e2e' @@ -286,23 +116,24 @@ jobs: env: GO_EXPERIMENT_IMAGE: litmuschaos/go-runner:ci EXPERIMENT_IMAGE_PULL_POLICY: IfNotPresent - KUBECONFIG: /etc/rancher/k3s/k3s.yaml + KUBECONFIG: /home/runner/.kube/config run: | make build-litmus make app-deploy make pod-affected-perc-ton-parallel - - name: Deleting K3S cluster + + - name: Deleting KinD cluster if: always() - run: /usr/local/bin/k3s-uninstall.sh + run: kind delete cluster Node_Level_Tests: runs-on: ubuntu-latest steps: # Install golang - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v5 with: - go-version: 1.18 + go-version: '1.20' - uses: actions/checkout@v2 with: @@ -312,8 +143,15 @@ jobs: run: | make build-amd64 + - name: Install KinD + run: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.22.0/kind-linux-amd64 + chmod +x ./kind + mv ./kind /usr/local/bin/kind + - name: Create KinD Cluster - run: kind create cluster --config build/kind-cluster/kind-config.yaml + run: | + kind create cluster --config build/kind-cluster/kind-config.yaml - name: Configuring and testing the Installation run: | @@ -324,7 +162,7 @@ jobs: - name: Load image on the nodes of the cluster run: | - kind load docker-image --name=kind litmuschaos/go-runner:ci + kind load docker-image --name=kind litmuschaos/go-runner:ci - uses: actions/checkout@v2 with: @@ -355,4 +193,6 @@ jobs: - name: Deleting KinD cluster if: always() - run: kind delete cluster + run: | + kubectl get nodes + kind delete cluster diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 613d6ff1b..85584a749 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -4,14 +4,6 @@ on: workflow_dispatch: jobs: - snyk: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@master - - name: Run Snyk to check for vulnerabilities - uses: snyk/actions/golang@master - env: - SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} trivy: runs-on: ubuntu-latest @@ -22,7 +14,7 @@ jobs: - name: Build an image from Dockerfile run: | - docker build -f build/Dockerfile -t docker.io/litmuschaos/go-runner:${{ github.sha }} . --build-arg TARGETARCH=amd64 + docker build -f build/Dockerfile -t docker.io/litmuschaos/go-runner:${{ github.sha }} . --build-arg TARGETARCH=amd64 --build-arg LITMUS_VERSION=3.9.0 - name: Run Trivy vulnerability scanner uses: aquasecurity/trivy-action@master diff --git a/Makefile b/Makefile index 3b188e3ce..82c852d13 100644 --- a/Makefile +++ b/Makefile @@ -9,9 +9,9 @@ IS_DOCKER_INSTALLED = $(shell which docker >> /dev/null 2>&1; echo $$?) # Docker info DOCKER_REGISTRY ?= docker.io -DOCKER_REPO ?= shubh214 +DOCKER_REPO ?= litmuschaos DOCKER_IMAGE ?= go-runner -DOCKER_TAG ?= fix-sudo +DOCKER_TAG ?= ci .PHONY: help help: @@ -72,7 +72,7 @@ image-push: @echo "--> Push go-runner image" @echo "------------------------" @echo "Pushing $(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG)" - @docker buildx build . --push --file build/Dockerfile --progress plane --platform linux/arm64,linux/amd64 --no-cache --tag $(DOCKER_REGISTRY)/$(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG) + @docker buildx build . --push --file build/Dockerfile --progress plain --platform linux/arm64,linux/amd64 --no-cache --tag $(DOCKER_REGISTRY)/$(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG) .PHONY: build-amd64 @@ -80,7 +80,7 @@ build-amd64: @echo "-------------------------" @echo "--> Build go-runner image" @echo "-------------------------" - @sudo docker build --file build/Dockerfile --tag $(DOCKER_REGISTRY)/$(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETARCH=amd64 + @sudo docker build --file build/Dockerfile --tag $(DOCKER_REGISTRY)/$(DOCKER_REPO)/$(DOCKER_IMAGE):$(DOCKER_TAG) . --build-arg TARGETARCH=amd64 --build-arg LITMUS_VERSION=3.9.0 .PHONY: push-amd64 push-amd64: diff --git a/bin/experiment/experiment.go b/bin/experiment/experiment.go index b99941dd3..ef01e1f2f 100755 --- a/bin/experiment/experiment.go +++ b/bin/experiment/experiment.go @@ -1,7 +1,11 @@ package main import ( + "context" + "errors" "flag" + "os" + // Uncomment to load all auth plugins // _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -56,12 +60,14 @@ import ( ebsLossByTag "github.com/litmuschaos/litmus-go/experiments/kube-aws/ebs-loss-by-tag/experiment" ec2TerminateByID "github.com/litmuschaos/litmus-go/experiments/kube-aws/ec2-terminate-by-id/experiment" ec2TerminateByTag "github.com/litmuschaos/litmus-go/experiments/kube-aws/ec2-terminate-by-tag/experiment" + k6Loadgen "github.com/litmuschaos/litmus-go/experiments/load/k6-loadgen/experiment" springBootFaults "github.com/litmuschaos/litmus-go/experiments/spring-boot/spring-boot-faults/experiment" vmpoweroff "github.com/litmuschaos/litmus-go/experiments/vmware/vm-poweroff/experiment" - - "github.com/litmuschaos/litmus-go/pkg/clients" + cli "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" ) func init() { @@ -74,8 +80,25 @@ func init() { } func main() { + initCtx := context.Background() + + // Set up Observability. + if otelExporterEndpoint := os.Getenv(telemetry.OTELExporterOTLPEndpoint); otelExporterEndpoint != "" { + shutdown, err := telemetry.InitOTelSDK(initCtx, true, otelExporterEndpoint) + if err != nil { + log.Errorf("Failed to initialize OTel SDK: %v", err) + return + } + defer func() { + err = errors.Join(err, shutdown(initCtx)) + }() + initCtx = telemetry.GetTraceParentContext() + } + + clients := cli.ClientSets{} - clients := clients.ClientSets{} + ctx, span := otel.Tracer(telemetry.TracerName).Start(initCtx, "ExecuteExperiment") + defer span.End() // parse the experiment name experimentName := flag.String("name", "pod-delete", "name of the chaos experiment") @@ -91,99 +114,101 @@ func main() { // invoke the corresponding experiment based on the (-name) flag switch *experimentName { case "container-kill": - containerKill.ContainerKill(clients) + containerKill.ContainerKill(ctx, clients) case "disk-fill": - diskFill.DiskFill(clients) + diskFill.DiskFill(ctx, clients) case "kafka-broker-pod-failure": - kafkaBrokerPodFailure.KafkaBrokerPodFailure(clients) + kafkaBrokerPodFailure.KafkaBrokerPodFailure(ctx, clients) case "kubelet-service-kill": - kubeletServiceKill.KubeletServiceKill(clients) + kubeletServiceKill.KubeletServiceKill(ctx, clients) case "docker-service-kill": - dockerServiceKill.DockerServiceKill(clients) + dockerServiceKill.DockerServiceKill(ctx, clients) case "node-cpu-hog": - nodeCPUHog.NodeCPUHog(clients) + nodeCPUHog.NodeCPUHog(ctx, clients) case "node-drain": - nodeDrain.NodeDrain(clients) + nodeDrain.NodeDrain(ctx, clients) case "node-io-stress": - nodeIOStress.NodeIOStress(clients) + nodeIOStress.NodeIOStress(ctx, clients) case "node-memory-hog": - nodeMemoryHog.NodeMemoryHog(clients) + nodeMemoryHog.NodeMemoryHog(ctx, clients) case "node-taint": - nodeTaint.NodeTaint(clients) + nodeTaint.NodeTaint(ctx, clients) case "pod-autoscaler": - podAutoscaler.PodAutoscaler(clients) + podAutoscaler.PodAutoscaler(ctx, clients) case "pod-cpu-hog-exec": - podCPUHogExec.PodCPUHogExec(clients) + podCPUHogExec.PodCPUHogExec(ctx, clients) case "pod-delete": - podDelete.PodDelete(clients) + podDelete.PodDelete(ctx, clients) case "pod-io-stress": - podIOStress.PodIOStress(clients) + podIOStress.PodIOStress(ctx, clients) case "pod-memory-hog-exec": - podMemoryHogExec.PodMemoryHogExec(clients) + podMemoryHogExec.PodMemoryHogExec(ctx, clients) case "pod-network-corruption": - podNetworkCorruption.PodNetworkCorruption(clients) + podNetworkCorruption.PodNetworkCorruption(ctx, clients) case "pod-network-duplication": - podNetworkDuplication.PodNetworkDuplication(clients) + podNetworkDuplication.PodNetworkDuplication(ctx, clients) case "pod-network-latency": - podNetworkLatency.PodNetworkLatency(clients) + podNetworkLatency.PodNetworkLatency(ctx, clients) case "pod-network-loss": - podNetworkLoss.PodNetworkLoss(clients) + podNetworkLoss.PodNetworkLoss(ctx, clients) case "pod-network-partition": - podNetworkPartition.PodNetworkPartition(clients) + podNetworkPartition.PodNetworkPartition(ctx, clients) case "pod-memory-hog": - podMemoryHog.PodMemoryHog(clients) + podMemoryHog.PodMemoryHog(ctx, clients) case "pod-cpu-hog": - podCPUHog.PodCPUHog(clients) + podCPUHog.PodCPUHog(ctx, clients) case "cassandra-pod-delete": - cassandraPodDelete.CasssandraPodDelete(clients) + cassandraPodDelete.CasssandraPodDelete(ctx, clients) case "aws-ssm-chaos-by-id": - awsSSMChaosByID.AWSSSMChaosByID(clients) + awsSSMChaosByID.AWSSSMChaosByID(ctx, clients) case "aws-ssm-chaos-by-tag": - awsSSMChaosByTag.AWSSSMChaosByTag(clients) + awsSSMChaosByTag.AWSSSMChaosByTag(ctx, clients) case "ec2-terminate-by-id": - ec2TerminateByID.EC2TerminateByID(clients) + ec2TerminateByID.EC2TerminateByID(ctx, clients) case "ec2-terminate-by-tag": - ec2TerminateByTag.EC2TerminateByTag(clients) + ec2TerminateByTag.EC2TerminateByTag(ctx, clients) case "ebs-loss-by-id": - ebsLossByID.EBSLossByID(clients) + ebsLossByID.EBSLossByID(ctx, clients) case "ebs-loss-by-tag": - ebsLossByTag.EBSLossByTag(clients) + ebsLossByTag.EBSLossByTag(ctx, clients) case "node-restart": - nodeRestart.NodeRestart(clients) + nodeRestart.NodeRestart(ctx, clients) case "pod-dns-error": - podDNSError.PodDNSError(clients) + podDNSError.PodDNSError(ctx, clients) case "pod-dns-spoof": - podDNSSpoof.PodDNSSpoof(clients) + podDNSSpoof.PodDNSSpoof(ctx, clients) case "pod-http-latency": - podHttpLatency.PodHttpLatency(clients) + podHttpLatency.PodHttpLatency(ctx, clients) case "pod-http-status-code": - podHttpStatusCode.PodHttpStatusCode(clients) + podHttpStatusCode.PodHttpStatusCode(ctx, clients) case "pod-http-modify-header": - podHttpModifyHeader.PodHttpModifyHeader(clients) + podHttpModifyHeader.PodHttpModifyHeader(ctx, clients) case "pod-http-modify-body": - podHttpModifyBody.PodHttpModifyBody(clients) + podHttpModifyBody.PodHttpModifyBody(ctx, clients) case "pod-http-reset-peer": - podHttpResetPeer.PodHttpResetPeer(clients) + podHttpResetPeer.PodHttpResetPeer(ctx, clients) case "vm-poweroff": - vmpoweroff.VMPoweroff(clients) + vmpoweroff.VMPoweroff(ctx, clients) case "azure-instance-stop": - azureInstanceStop.AzureInstanceStop(clients) + azureInstanceStop.AzureInstanceStop(ctx, clients) case "azure-disk-loss": - azureDiskLoss.AzureDiskLoss(clients) + azureDiskLoss.AzureDiskLoss(ctx, clients) case "gcp-vm-disk-loss": - gcpVMDiskLoss.VMDiskLoss(clients) + gcpVMDiskLoss.VMDiskLoss(ctx, clients) case "pod-fio-stress": - podFioStress.PodFioStress(clients) + podFioStress.PodFioStress(ctx, clients) case "gcp-vm-instance-stop": - gcpVMInstanceStop.VMInstanceStop(clients) + gcpVMInstanceStop.VMInstanceStop(ctx, clients) case "redfish-node-restart": - redfishNodeRestart.NodeRestart(clients) + redfishNodeRestart.NodeRestart(ctx, clients) case "gcp-vm-instance-stop-by-label": - gcpVMInstanceStopByLabel.GCPVMInstanceStopByLabel(clients) + gcpVMInstanceStopByLabel.GCPVMInstanceStopByLabel(ctx, clients) case "gcp-vm-disk-loss-by-label": - gcpVMDiskLossByLabel.GCPVMDiskLossByLabel(clients) + gcpVMDiskLossByLabel.GCPVMDiskLossByLabel(ctx, clients) case "spring-boot-cpu-stress", "spring-boot-memory-stress", "spring-boot-exceptions", "spring-boot-app-kill", "spring-boot-faults", "spring-boot-latency": - springBootFaults.Experiment(clients, *experimentName) + springBootFaults.Experiment(ctx, clients, *experimentName) + case "k6-loadgen": + k6Loadgen.Experiment(ctx, clients) default: log.Errorf("Unsupported -name %v, please provide the correct value of -name args", *experimentName) return diff --git a/bin/helper/helper.go b/bin/helper/helper.go index 3958123c5..c2774a131 100644 --- a/bin/helper/helper.go +++ b/bin/helper/helper.go @@ -1,7 +1,11 @@ package main import ( + "context" + "errors" "flag" + "os" + // Uncomment to load all auth plugins // _ "k8s.io/client-go/plugin/pkg/client/auth" @@ -17,10 +21,11 @@ import ( networkChaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/helper" dnsChaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-dns-chaos/helper" stressChaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/helper" - - "github.com/litmuschaos/litmus-go/pkg/clients" + cli "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" ) func init() { @@ -33,8 +38,24 @@ func init() { } func main() { + ctx := context.Background() + // Set up Observability. + if otelExporterEndpoint := os.Getenv(telemetry.OTELExporterOTLPEndpoint); otelExporterEndpoint != "" { + shutdown, err := telemetry.InitOTelSDK(ctx, true, otelExporterEndpoint) + if err != nil { + log.Errorf("Failed to initialize OTel SDK: %v", err) + return + } + defer func() { + err = errors.Join(err, shutdown(ctx)) + }() + ctx = telemetry.GetTraceParentContext() + } + + clients := cli.ClientSets{} - clients := clients.ClientSets{} + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "ExecuteExperimentHelper") + defer span.End() // parse the helper name helperName := flag.String("name", "", "name of the helper pod") @@ -50,17 +71,17 @@ func main() { // invoke the corresponding helper based on the the (-name) flag switch *helperName { case "container-kill": - containerKill.Helper(clients) + containerKill.Helper(ctx, clients) case "disk-fill": - diskFill.Helper(clients) + diskFill.Helper(ctx, clients) case "dns-chaos": - dnsChaos.Helper(clients) + dnsChaos.Helper(ctx, clients) case "stress-chaos": - stressChaos.Helper(clients) + stressChaos.Helper(ctx, clients) case "network-chaos": - networkChaos.Helper(clients) + networkChaos.Helper(ctx, clients) case "http-chaos": - httpChaos.Helper(clients) + httpChaos.Helper(ctx, clients) default: log.Errorf("Unsupported -name %v, please provide the correct value of -name args", *helperName) diff --git a/build/Dockerfile b/build/Dockerfile index 16aca31f9..2cc7fc772 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -1,6 +1,6 @@ # Multi-stage docker build # Build stage -FROM golang:1.18 AS builder +FROM golang:1.22 AS builder ARG TARGETOS=linux ARG TARGETARCH @@ -14,27 +14,99 @@ RUN export GOOS=${TARGETOS} && \ RUN CGO_ENABLED=0 go build -o /output/experiments ./bin/experiment RUN CGO_ENABLED=0 go build -o /output/helpers ./bin/helper -FROM alpine:3.15.0 AS dep +# Packaging stage +FROM registry.access.redhat.com/ubi9/ubi:9.4 + +LABEL maintainer="LitmusChaos" + +ARG TARGETARCH +ARG LITMUS_VERSION # Install generally useful things -RUN apk --update add \ - sudo \ - iproute2 \ - iptables +RUN yum install -y \ + sudo \ + sshpass \ + procps \ + openssh-clients +# tc binary +RUN yum install -y https://dl.rockylinux.org/vault/rocky/9.3/devel/$(uname -m)/os/Packages/i/iproute-6.2.0-5.el9.$(uname -m).rpm +RUN yum install -y https://dl.rockylinux.org/vault/rocky/9.3/devel/$(uname -m)/os/Packages/i/iproute-tc-6.2.0-5.el9.$(uname -m).rpm -# Packaging stage -# Image source: https://github.com/litmuschaos/test-tools/blob/master/custom/hardened-alpine/experiment/Dockerfile -# The base image is non-root (have litmus user) with default litmus directory. -FROM litmuschaos/experiment-alpine +# iptables +RUN yum install -y https://dl.rockylinux.org/vault/rocky/9.3/devel/$(uname -m)/os/Packages/i/iptables-libs-1.8.8-6.el9_1.$(uname -m).rpm +RUN yum install -y https://dl.fedoraproject.org/pub/archive/epel/9.3/Everything/$(uname -m)/Packages/i/iptables-legacy-libs-1.8.8-6.el9.2.$(uname -m).rpm +RUN yum install -y https://dl.fedoraproject.org/pub/archive/epel/9.3/Everything/$(uname -m)/Packages/i/iptables-legacy-1.8.8-6.el9.2.$(uname -m).rpm -LABEL maintainer="LitmusChaos" +# stress-ng +RUN yum install -y https://yum.oracle.com/repo/OracleLinux/OL9/appstream/$(uname -m)/getPackage/Judy-1.0.5-28.el9.$(uname -m).rpm +RUN yum install -y https://yum.oracle.com/repo/OracleLinux/OL9/appstream/$(uname -m)/getPackage/stress-ng-0.14.00-2.el9.$(uname -m).rpm + +#Installing Kubectl +ENV KUBE_LATEST_VERSION="v1.31.0" +RUN curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/${TARGETARCH}/kubectl -o /usr/bin/kubectl && \ + chmod 755 /usr/bin/kubectl + +#Installing crictl binaries +RUN curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.31.1/crictl-v1.31.1-linux-${TARGETARCH}.tar.gz --output crictl-v1.31.1-linux-${TARGETARCH}.tar.gz && \ + tar zxvf crictl-v1.31.1-linux-${TARGETARCH}.tar.gz -C /sbin && \ + chmod 755 /sbin/crictl + +#Installing promql cli binaries +RUN curl -L https://github.com/chaosnative/promql-cli/releases/download/3.0.0-beta6/promql_linux_${TARGETARCH} --output /usr/bin/promql && chmod 755 /usr/bin/promql + +#Installing pause cli binaries +RUN curl -L https://github.com/litmuschaos/test-tools/releases/download/${LITMUS_VERSION}/pause-linux-${TARGETARCH} --output /usr/bin/pause && chmod 755 /usr/bin/pause + +#Installing dns_interceptor cli binaries +RUN curl -L https://github.com/litmuschaos/test-tools/releases/download/${LITMUS_VERSION}/dns_interceptor --output /sbin/dns_interceptor && chmod 755 /sbin/dns_interceptor + +#Installing nsutil cli binaries +RUN curl -L https://github.com/litmuschaos/test-tools/releases/download/${LITMUS_VERSION}/nsutil-linux-${TARGETARCH} --output /sbin/nsutil && chmod 755 /sbin/nsutil + +#Installing nsutil shared lib +RUN curl -L https://github.com/litmuschaos/test-tools/releases/download/${LITMUS_VERSION}/nsutil_${TARGETARCH}.so --output /usr/local/lib/nsutil.so && chmod 755 /usr/local/lib/nsutil.so + +# Installing toxiproxy binaries +RUN curl -L https://litmus-http-proxy.s3.amazonaws.com/cli/cli/toxiproxy-cli-linux-${TARGETARCH}.tar.gz --output toxiproxy-cli-linux-${TARGETARCH}.tar.gz && \ + tar zxvf toxiproxy-cli-linux-${TARGETARCH}.tar.gz -C /sbin/ && \ + chmod 755 /sbin/toxiproxy-cli +RUN curl -L https://litmus-http-proxy.s3.amazonaws.com/server/server/toxiproxy-server-linux-${TARGETARCH}.tar.gz --output toxiproxy-server-linux-${TARGETARCH}.tar.gz && \ + tar zxvf toxiproxy-server-linux-${TARGETARCH}.tar.gz -C /sbin/ && \ + chmod 755 /sbin/toxiproxy-server + +ENV APP_USER=litmus +ENV APP_DIR="/$APP_USER" +ENV DATA_DIR="$APP_DIR/data" + +# The USERD_ID of user +ENV APP_USER_ID=2000 +RUN useradd -s /bin/true -u $APP_USER_ID -m -d $APP_DIR $APP_USER + +# change to 0(root) group because openshift will run container with arbitrary uid as a member of root group +RUN chgrp -R 0 "$APP_DIR" && chmod -R g=u "$APP_DIR" + +# Giving sudo to all users (required for almost all experiments) +RUN echo 'ALL ALL=(ALL:ALL) NOPASSWD: ALL' >> /etc/sudoers + +WORKDIR $APP_DIR + +COPY --from=builder /output/ . + +COPY --from=docker:27.0.3 /usr/local/bin/docker /sbin/docker +RUN chmod 755 /sbin/docker + +# Set permissions and ownership for the copied binaries +RUN chmod 755 ./experiments ./helpers && \ + chown ${APP_USER}:0 ./experiments ./helpers + +# Set ownership for binaries in /sbin and /usr/bin +RUN chown ${APP_USER}:0 /sbin/* /usr/bin/* && \ + chown root:root /usr/bin/sudo && \ + chmod 4755 /usr/bin/sudo -COPY --from=builder /output/ /litmus -COPY --from=dep /usr/bin/sudo /usr/bin/sudo -COPY --from=dep /usr/lib/sudo /usr/lib/sudo -COPY --from=dep /sbin/tc /sbin/ -COPY --from=dep /sbin/iptables /sbin/ +# Copying Necessary Files +COPY ./pkg/cloud/aws/common/ssm-docs/LitmusChaos-AWS-SSM-Docs.yml ./LitmusChaos-AWS-SSM-Docs.yml +RUN chown ${APP_USER}:0 ./LitmusChaos-AWS-SSM-Docs.yml && chmod 755 ./LitmusChaos-AWS-SSM-Docs.yml -#Copying Necessary Files -COPY ./pkg/cloud/aws/common/ssm-docs/LitmusChaos-AWS-SSM-Docs.yml . +USER ${APP_USER} \ No newline at end of file diff --git a/build/kind-cluster/kind-config.yaml b/build/kind-cluster/kind-config.yaml index c61aead9f..752e993cd 100644 --- a/build/kind-cluster/kind-config.yaml +++ b/build/kind-cluster/kind-config.yaml @@ -1,7 +1,6 @@ -apiVersion: kind.x-k8s.io/v1alpha4 kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane - role: worker - role: worker -- role: worker diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go index 9205e7f53..0a54489fd 100644 --- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go +++ b/chaoslib/litmus/aws-ssm-chaos/lib/ssm-chaos.go @@ -1,23 +1,28 @@ package lib import ( + "context" "os" "strings" "time" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) // InjectChaosInSerialMode will inject the aws ssm chaos in serial mode that is one after other -func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, inject chan os.Signal) error { +func InjectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, inject chan os.Signal) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSSSMFaultInSerialMode") + defer span.End() select { case <-inject: @@ -60,7 +65,7 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -85,7 +90,9 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // InjectChaosInParallelMode will inject the aws ssm chaos in parallel mode that is all at once -func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, inject chan os.Signal) error { +func InjectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, inject chan os.Signal) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSSSMFaultInParallelMode") + defer span.End() select { case <-inject: @@ -125,7 +132,7 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go index 0eb99d158..e4bb5a50b 100644 --- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go +++ b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-id.go @@ -1,6 +1,7 @@ package ssm import ( + "context" "fmt" "os" "os/signal" @@ -10,12 +11,14 @@ import ( "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) var ( @@ -24,7 +27,9 @@ var ( ) // PrepareAWSSSMChaosByID contains the prepration and injection steps for the experiment -func PrepareAWSSSMChaosByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareAWSSSMChaosByID(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareAWSSSMFaultByID") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -60,11 +65,11 @@ func PrepareAWSSSMChaosByID(experimentsDetails *experimentTypes.ExperimentDetail switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = lib.InjectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { + if err = lib.InjectChaosInSerialMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = lib.InjectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { + if err = lib.InjectChaosInParallelMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: diff --git a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go index 99884e697..c7e872c7b 100644 --- a/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go +++ b/chaoslib/litmus/aws-ssm-chaos/lib/ssm/aws-ssm-chaos-by-tag.go @@ -1,6 +1,7 @@ package ssm import ( + "context" "fmt" "os" "os/signal" @@ -10,16 +11,20 @@ import ( "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) // PrepareAWSSSMChaosByTag contains the prepration and injection steps for the experiment -func PrepareAWSSSMChaosByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareAWSSSMChaosByTag(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSSSMFaultByTag") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -55,11 +60,11 @@ func PrepareAWSSSMChaosByTag(experimentsDetails *experimentTypes.ExperimentDetai switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = lib.InjectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { + if err = lib.InjectChaosInSerialMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = lib.InjectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { + if err = lib.InjectChaosInParallelMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails, inject); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: diff --git a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go b/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go index aa2c16ee8..210377809 100644 --- a/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go +++ b/chaoslib/litmus/azure-disk-loss/lib/azure-disk-loss.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -11,16 +12,18 @@ import ( "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/types" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" diskStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/disk" instanceStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/litmuschaos/litmus-go/pkg/utils/retry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) var ( @@ -29,7 +32,9 @@ var ( ) // PrepareChaos contains the prepration and injection steps for the experiment -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareAzureDiskLossFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -79,11 +84,11 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, instanceNamesWithDiskNames, attachedDisksWithInstance, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -100,7 +105,9 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients } // injectChaosInParallelMode will inject the Azure disk loss chaos in parallel mode that is all at once -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAzureDiskLossFaultInParallelMode") + defer span.End() //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -139,7 +146,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -178,7 +185,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // injectChaosInSerialMode will inject the Azure disk loss chaos in serial mode that is one after other -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesWithDiskNames map[string][]string, attachedDisksWithInstance map[string]*[]compute.DataDisk, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAzureDiskLossFaultInSerialMode") + defer span.End() //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -214,7 +223,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } diff --git a/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go b/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go index 8b3950da7..eefd1c54a 100644 --- a/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go +++ b/chaoslib/litmus/azure-instance-stop/lib/azure-instance-stop.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -10,15 +11,17 @@ import ( experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/types" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" azureCommon "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" azureStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) var ( @@ -27,7 +30,9 @@ var ( ) // PrepareAzureStop will initialize instanceNameList and start chaos injection based on sequence method selected -func PrepareAzureStop(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareAzureStop(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareAzureInstanceStopFault") + defer span.End() // inject channel is used to transmit signal notifications inject = make(chan os.Signal, 1) @@ -55,11 +60,11 @@ func PrepareAzureStop(experimentsDetails *experimentTypes.ExperimentDetails, cli switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, instanceNameList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -75,7 +80,10 @@ func PrepareAzureStop(experimentsDetails *experimentTypes.ExperimentDetails, cli } // injectChaosInSerialMode will inject the Azure instance termination in serial mode that is one after the other -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAzureInstanceStopFaultInSerialMode") + defer span.End() + select { case <-inject: // stopping the chaos execution, if abort signal received @@ -119,7 +127,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // Run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -153,7 +161,10 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode will inject the Azure instance termination in parallel mode that is all at once -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceNameList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAzureInstanceStopFaultInParallelMode") + defer span.End() + select { case <-inject: // Stopping the chaos execution, if abort signal received @@ -198,7 +209,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // Run probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } diff --git a/chaoslib/litmus/container-kill/helper/container-kill.go b/chaoslib/litmus/container-kill/helper/container-kill.go index d92b15956..81e6b1a67 100644 --- a/chaoslib/litmus/container-kill/helper/container-kill.go +++ b/chaoslib/litmus/container-kill/helper/container-kill.go @@ -4,13 +4,16 @@ import ( "bytes" "context" "fmt" + "github.com/litmuschaos/litmus-go/pkg/telemetry" + "go.opentelemetry.io/otel" + "os/exec" + "strconv" + "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/result" "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" - "os/exec" - "strconv" - "time" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" @@ -26,7 +29,9 @@ import ( var err error // Helper injects the container-kill chaos -func Helper(clients clients.ClientSets) { +func Helper(ctx context.Context, clients clients.ClientSets) { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "SimulateContainerKillFault") + defer span.End() experimentsDetails := experimentTypes.ExperimentDetails{} eventsDetails := types.EventDetails{} @@ -171,7 +176,7 @@ func validate(t targetDetails, timeout, delay int, clients clients.ClientSets) e return verifyRestartCount(t, timeout, delay, clients, t.RestartCountBefore) } -//stopContainerdContainer kill the application container +// stopContainerdContainer kill the application container func stopContainerdContainer(containerIDs []string, socketPath, signal, source string) error { if signal != "SIGKILL" && signal != "SIGTERM" { return cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: source, Reason: fmt.Sprintf("unsupported signal %s, use either SIGTERM or SIGKILL", signal)} @@ -192,7 +197,7 @@ func stopContainerdContainer(containerIDs []string, socketPath, signal, source s return nil } -//stopDockerContainer kill the application container +// stopDockerContainer kill the application container func stopDockerContainer(containerIDs []string, socketPath, signal, source string) error { var errOut, out bytes.Buffer cmd := exec.Command("sudo", "docker", "--host", fmt.Sprintf("unix://%s", socketPath), "kill", "--signal", signal) @@ -205,7 +210,7 @@ func stopDockerContainer(containerIDs []string, socketPath, signal, source strin return nil } -//getRestartCount return the restart count of target container +// getRestartCount return the restart count of target container func getRestartCount(target targetDetails, clients clients.ClientSets) (int, error) { pod, err := clients.KubeClient.CoreV1().Pods(target.Namespace).Get(context.Background(), target.Name, v1.GetOptions{}) if err != nil { @@ -221,7 +226,7 @@ func getRestartCount(target targetDetails, clients clients.ClientSets) (int, err return restartCount, nil } -//verifyRestartCount verify the restart count of target container that it is restarted or not after chaos injection +// verifyRestartCount verify the restart count of target container that it is restarted or not after chaos injection func verifyRestartCount(t targetDetails, timeout, delay int, clients clients.ClientSets, restartCountBefore int) error { restartCountAfter := 0 @@ -247,7 +252,7 @@ func verifyRestartCount(t targetDetails, timeout, delay int, clients clients.Cli }) } -//getENV fetches all the env variables from the runner pod +// getENV fetches all the env variables from the runner pod func getENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") diff --git a/chaoslib/litmus/container-kill/lib/container-kill.go b/chaoslib/litmus/container-kill/lib/container-kill.go index 79c92b776..95d81bf96 100644 --- a/chaoslib/litmus/container-kill/lib/container-kill.go +++ b/chaoslib/litmus/container-kill/lib/container-kill.go @@ -3,13 +3,16 @@ package lib import ( "context" "fmt" + "os" "strconv" "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -22,8 +25,10 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PrepareContainerKill contains the preparation steps before chaos injection -func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PrepareContainerKill contains the preparation steps before chaos injection +func PrepareContainerKill(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareContainerKillFault") + defer span.End() var err error // Get the target pod details for the chaos execution @@ -31,7 +36,7 @@ func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { return cerrors.Error{ErrorCode: cerrors.ErrorTypeTargetSelection, Reason: "provide one of the appLabel or TARGET_PODS"} } - //Setup the tunables if provided in range + //Set up the tunables if provided in range SetChaosTunables(experimentsDetails) log.InfoWithValues("[Info]: The tunables are:", logrus.Fields{ @@ -67,11 +72,11 @@ func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -87,10 +92,12 @@ func PrepareContainerKill(experimentsDetails *experimentTypes.ExperimentDetails, } // injectChaosInSerialMode kill the container of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectContainerKillFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -105,7 +112,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai runID := stringutils.GetRunID() - if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -137,10 +144,12 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode kill the container of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectContainerKillFaultInParallelMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -154,7 +163,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer)) } - if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -187,7 +196,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateContainerKillFaultHelperPod") + defer span.End() privilegedEnable := false if experimentsDetails.ContainerRuntime == "crio" { @@ -231,7 +242,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie "./helpers -name container-kill", }, Resources: chaosDetails.Resources, - Env: getPodEnv(experimentsDetails, targets), + Env: getPodEnv(ctx, experimentsDetails, targets), VolumeMounts: []apiv1.VolumeMount{ { Name: "cri-socket", @@ -259,7 +270,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } // getPodEnv derive all the env required for the helper pod -func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { +func getPodEnv(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { var envDetails common.ENVDetails envDetails.SetEnv("TARGETS", targets). @@ -275,13 +286,15 @@ func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets st SetEnv("STATUS_CHECK_TIMEOUT", strconv.Itoa(experimentsDetails.Timeout)). SetEnv("EXPERIMENT_NAME", experimentsDetails.ExperimentName). SetEnv("INSTANCE_ID", experimentsDetails.InstanceID). + SetEnv("OTEL_EXPORTER_OTLP_ENDPOINT", os.Getenv(telemetry.OTELExporterOTLPEndpoint)). + SetEnv("TRACE_PARENT", telemetry.GetMarshalledSpanFromContext(ctx)). SetEnvFromDownwardAPI("v1", "metadata.name") return envDetails.ENV } -//SetChaosTunables will setup a random value within a given range of values -//If the value is not provided in range it'll setup the initial provided value. +// SetChaosTunables will setup a random value within a given range of values +// If the value is not provided in range it'll setup the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc) experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence) diff --git a/chaoslib/litmus/disk-fill/helper/disk-fill.go b/chaoslib/litmus/disk-fill/helper/disk-fill.go index 474f93f87..c851ba26f 100644 --- a/chaoslib/litmus/disk-fill/helper/disk-fill.go +++ b/chaoslib/litmus/disk-fill/helper/disk-fill.go @@ -4,7 +4,9 @@ import ( "context" "fmt" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "os" "os/exec" "os/signal" @@ -29,7 +31,9 @@ import ( var inject, abort chan os.Signal // Helper injects the disk-fill chaos -func Helper(clients clients.ClientSets) { +func Helper(ctx context.Context, clients clients.ClientSets) { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "SimulateDiskFillFault") + defer span.End() experimentsDetails := experimentTypes.ExperimentDetails{} eventsDetails := types.EventDetails{} @@ -185,8 +189,9 @@ func fillDisk(t targetDetails, bs int) error { out, err := cmd.CombinedOutput() if err != nil { log.Error(err.Error()) + return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: string(out)} } - return cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: string(out)} + return nil } // getEphemeralStorageAttributes derive the ephemeral storage attributes from the target pod diff --git a/chaoslib/litmus/disk-fill/lib/disk-fill.go b/chaoslib/litmus/disk-fill/lib/disk-fill.go index cf0192abd..0c63f84b2 100644 --- a/chaoslib/litmus/disk-fill/lib/disk-fill.go +++ b/chaoslib/litmus/disk-fill/lib/disk-fill.go @@ -3,13 +3,16 @@ package lib import ( "context" "fmt" + "os" "strconv" "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -23,8 +26,10 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PrepareDiskFill contains the preparation steps before chaos injection -func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PrepareDiskFill contains the preparation steps before chaos injection +func PrepareDiskFill(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareDiskFillFault") + defer span.End() var err error // It will contain all the pod & container details required for exec command @@ -72,11 +77,11 @@ func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clie experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, execCommandDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -92,11 +97,12 @@ func PrepareDiskFill(experimentsDetails *experimentTypes.ExperimentDetails, clie } // injectChaosInSerialMode fill the ephemeral storage of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectDiskFillFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -110,7 +116,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } runID := stringutils.GetRunID() - if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -144,12 +150,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode fill the ephemeral storage of of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { - +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, execCommandDetails exec.PodDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectDiskFillFaultInParallelMode") + defer span.End() var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -163,7 +170,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer)) } - if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -196,7 +203,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, appNodeName, runID string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, appNodeName, runID string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateDiskFillFaultHelperPod") + defer span.End() privilegedEnable := true terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) @@ -239,7 +248,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie "./helpers -name disk-fill", }, Resources: chaosDetails.Resources, - Env: getPodEnv(experimentsDetails, targets), + Env: getPodEnv(ctx, experimentsDetails, targets), VolumeMounts: []apiv1.VolumeMount{ { Name: "socket-path", @@ -267,7 +276,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } // getPodEnv derive all the env required for the helper pod -func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { +func getPodEnv(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { var envDetails common.ENVDetails envDetails.SetEnv("TARGETS", targets). @@ -283,6 +292,8 @@ func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets st SetEnv("INSTANCE_ID", experimentsDetails.InstanceID). SetEnv("SOCKET_PATH", experimentsDetails.SocketPath). SetEnv("CONTAINER_RUNTIME", experimentsDetails.ContainerRuntime). + SetEnv("OTEL_EXPORTER_OTLP_ENDPOINT", os.Getenv(telemetry.OTELExporterOTLPEndpoint)). + SetEnv("TRACE_PARENT", telemetry.GetMarshalledSpanFromContext(ctx)). SetEnvFromDownwardAPI("v1", "metadata.name") return envDetails.ENV diff --git a/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go b/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go index 6dd00f342..f1fef9c9e 100644 --- a/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go +++ b/chaoslib/litmus/docker-service-kill/lib/docker-service-kill.go @@ -6,9 +6,11 @@ import ( "strconv" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -23,7 +25,9 @@ import ( ) // PrepareDockerServiceKill contains prepration steps before chaos injection -func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareDockerServiceKill(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareDockerServiceKillFault") + defer span.End() var err error if experimentsDetails.TargetNode == "" { @@ -59,7 +63,7 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta } // Creating the helper pod to perform docker-service-kill - if err = createHelperPod(experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil { + if err = createHelperPod(ctx, experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -74,7 +78,7 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) return err } @@ -110,7 +114,9 @@ func PrepareDockerServiceKill(experimentsDetails *experimentTypes.ExperimentDeta } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appNodeName string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appNodeName string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateDockerServiceKillFaultHelperPod") + defer span.End() privileged := true terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go index b21a24de4..dbc504628 100644 --- a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go +++ b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib/ebs-loss-by-id.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,12 +10,14 @@ import ( ebsloss "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) var ( @@ -23,7 +26,9 @@ var ( ) // PrepareEBSLossByID contains the prepration and injection steps for the experiment -func PrepareEBSLossByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareEBSLossByID(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareAWSEBSLossFaultByID") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -57,11 +62,11 @@ func PrepareEBSLossByID(experimentsDetails *experimentTypes.ExperimentDetails, c switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = ebsloss.InjectChaosInSerialMode(experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = ebsloss.InjectChaosInSerialMode(ctx, experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = ebsloss.InjectChaosInParallelMode(experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = ebsloss.InjectChaosInParallelMode(ctx, experimentsDetails, volumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go index 0b2039c3e..6e8589129 100644 --- a/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go +++ b/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib/ebs-loss-by-tag.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,12 +10,14 @@ import ( ebsloss "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) var ( @@ -23,7 +26,9 @@ var ( ) // PrepareEBSLossByTag contains the prepration and injection steps for the experiment -func PrepareEBSLossByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareEBSLossByTag(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareAWSEBSLossFaultByTag") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -55,11 +60,11 @@ func PrepareEBSLossByTag(experimentsDetails *experimentTypes.ExperimentDetails, switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = ebsloss.InjectChaosInSerialMode(experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = ebsloss.InjectChaosInSerialMode(ctx, experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = ebsloss.InjectChaosInParallelMode(experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = ebsloss.InjectChaosInParallelMode(ctx, experimentsDetails, targetEBSVolumeIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: diff --git a/chaoslib/litmus/ebs-loss/lib/ebs-loss.go b/chaoslib/litmus/ebs-loss/lib/ebs-loss.go index 8fd39c06b..8fa9bb0e4 100644 --- a/chaoslib/litmus/ebs-loss/lib/ebs-loss.go +++ b/chaoslib/litmus/ebs-loss/lib/ebs-loss.go @@ -1,24 +1,29 @@ package lib import ( + "context" "fmt" "os" "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" ebs "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ebs" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) // InjectChaosInSerialMode will inject the ebs loss chaos in serial mode which means one after other -func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func InjectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSEBSLossFaultInSerialMode") + defer span.End() //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -56,7 +61,7 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -95,7 +100,9 @@ func InjectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // InjectChaosInParallelMode will inject the chaos in parallel mode that means all at once -func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func InjectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetEBSVolumeIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSEBSLossFaultInParallelMode") + defer span.End() var ec2InstanceIDList, deviceList []string @@ -152,7 +159,7 @@ func InjectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } diff --git a/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go b/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go index 2472b9467..5a844099a 100644 --- a/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go +++ b/chaoslib/litmus/ec2-terminate-by-id/lib/ec2-terminate-by-id.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,15 +10,17 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" awslib "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-id/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) var ( @@ -26,7 +29,9 @@ var ( ) // PrepareEC2TerminateByID contains the prepration and injection steps for the experiment -func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareEC2TerminateByID(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareAWSEC2TerminateFaultByID") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -55,11 +60,11 @@ func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetai switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -75,7 +80,9 @@ func PrepareEC2TerminateByID(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInSerialMode will inject the ec2 instance termination in serial mode that is one after other -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSEC2TerminateFaultByIDInSerialMode") + defer span.End() select { case <-inject: @@ -116,7 +123,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -147,7 +154,9 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode will inject the ec2 instance termination in parallel mode that is all at once -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSEC2TerminateFaultByIDInParallelMode") + defer span.End() select { case <-inject: @@ -189,7 +198,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } diff --git a/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go b/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go index 25e1b3a00..2c34b83b4 100644 --- a/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go +++ b/chaoslib/litmus/ec2-terminate-by-tag/lib/ec2-terminate-by-tag.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,22 +10,26 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" awslib "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-tag/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" ) var inject, abort chan os.Signal // PrepareEC2TerminateByTag contains the prepration and injection steps for the experiment -func PrepareEC2TerminateByTag(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareEC2TerminateByTag(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareAWSEC2TerminateFaultByTag") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -50,11 +55,11 @@ func PrepareEC2TerminateByTag(experimentsDetails *experimentTypes.ExperimentDeta switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err := injectChaosInSerialMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := injectChaosInSerialMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err := injectChaosInParallelMode(experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := injectChaosInParallelMode(ctx, experimentsDetails, instanceIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -70,7 +75,9 @@ func PrepareEC2TerminateByTag(experimentsDetails *experimentTypes.ExperimentDeta } // injectChaosInSerialMode will inject the ce2 instance termination in serial mode that is one after other -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSEC2TerminateFaultByTagInSerialMode") + defer span.End() select { case <-inject: @@ -111,7 +118,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -142,7 +149,9 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode will inject the ce2 instance termination in parallel mode that is all at once -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, instanceIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectAWSEC2TerminateFaultByTagInParallelMode") + defer span.End() select { case <-inject: @@ -182,7 +191,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -221,14 +230,14 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // SetTargetInstance will select the target instance which are in running state and filtered from the given instance tag func SetTargetInstance(experimentsDetails *experimentTypes.ExperimentDetails) error { - instanceIDList, err := awslib.GetInstanceList(experimentsDetails.InstanceTag, experimentsDetails.Region) + instanceIDList, err := awslib.GetInstanceList(experimentsDetails.Ec2InstanceTag, experimentsDetails.Region) if err != nil { return stacktrace.Propagate(err, "failed to get the instance id list") } if len(instanceIDList) == 0 { return cerrors.Error{ ErrorCode: cerrors.ErrorTypeTargetSelection, - Reason: fmt.Sprintf("no instance found with the given tag %v, in region %v", experimentsDetails.InstanceTag, experimentsDetails.Region), + Reason: fmt.Sprintf("no instance found with the given tag %v, in region %v", experimentsDetails.Ec2InstanceTag, experimentsDetails.Region), } } @@ -246,7 +255,7 @@ func SetTargetInstance(experimentsDetails *experimentTypes.ExperimentDetails) er return cerrors.Error{ ErrorCode: cerrors.ErrorTypeChaosInject, Reason: "failed to get any running instance", - Target: fmt.Sprintf("EC2 Instance Tag: %v", experimentsDetails.InstanceTag)} + Target: fmt.Sprintf("EC2 Instance Tag: %v", experimentsDetails.Ec2InstanceTag)} } log.InfoWithValues("[Info]: Targeting the running instances filtered from instance tag", logrus.Fields{ diff --git a/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go b/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go index fb6dff6b7..42efdf8bd 100644 --- a/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go +++ b/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib/gcp-vm-disk-loss-by-label.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,15 +10,17 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "google.golang.org/api/compute/v1" ) @@ -27,7 +30,9 @@ var ( ) // PrepareDiskVolumeLossByLabel contains the prepration and injection steps for the experiment -func PrepareDiskVolumeLossByLabel(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareDiskVolumeLossByLabel(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareGCPDiskVolumeLossFaultByLabel") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -63,11 +68,11 @@ func PrepareDiskVolumeLossByLabel(computeService *compute.Service, experimentsDe switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, computeService, experimentsDetails, diskVolumeNamesList, experimentsDetails.TargetDiskInstanceNamesList, experimentsDetails.Zones, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -85,7 +90,9 @@ func PrepareDiskVolumeLossByLabel(computeService *compute.Service, experimentsDe } // injectChaosInSerialMode will inject the disk loss chaos in serial mode which means one after the other -func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, instanceNamesList []string, zone string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, instanceNamesList []string, zone string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectGCPDiskVolumeLossFaultByLabelInSerialMode") + defer span.End() //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -118,7 +125,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -160,7 +167,9 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails } // injectChaosInParallelMode will inject the disk loss chaos in parallel mode that means all at once -func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, instanceNamesList []string, zone string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, instanceNamesList []string, zone string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectGCPDiskVolumeLossFaultByLabelInParallelMode") + defer span.End() //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -196,7 +205,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } diff --git a/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go b/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go index 38f06901e..6a99010d9 100644 --- a/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go +++ b/chaoslib/litmus/gcp-vm-disk-loss/lib/gcp-vm-disk-loss.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -10,15 +11,17 @@ import ( "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" - gcp "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" + "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" "github.com/pkg/errors" + "go.opentelemetry.io/otel" "google.golang.org/api/compute/v1" ) @@ -28,7 +31,9 @@ var ( ) // PrepareDiskVolumeLoss contains the prepration and injection steps for the experiment -func PrepareDiskVolumeLoss(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareDiskVolumeLoss(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareVMDiskLossFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -68,11 +73,11 @@ func PrepareDiskVolumeLoss(computeService *compute.Service, experimentsDetails * switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, computeService, experimentsDetails, diskNamesList, diskZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -90,8 +95,9 @@ func PrepareDiskVolumeLoss(computeService *compute.Service, experimentsDetails * } // injectChaosInSerialMode will inject the disk loss chaos in serial mode which means one after the other -func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, diskZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - +func injectChaosInSerialMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, diskZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectVMDiskLossFaultInSerialMode") + defer span.End() //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() duration := int(time.Since(ChaosStartTimeStamp).Seconds()) @@ -122,7 +128,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -161,7 +167,9 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails } // injectChaosInParallelMode will inject the disk loss chaos in parallel mode that means all at once -func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, diskZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, targetDiskVolumeNamesList, diskZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectVMDiskLossFaultInParallelMode") + defer span.End() //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -197,7 +205,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } diff --git a/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go b/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go index ca5eb4c0c..644a02137 100644 --- a/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go +++ b/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib/gcp-vm-instance-stop-by-label.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,22 +10,26 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" gcplib "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "google.golang.org/api/compute/v1" ) var inject, abort chan os.Signal // PrepareVMStopByLabel executes the experiment steps by injecting chaos into target VM instances -func PrepareVMStopByLabel(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareVMStopByLabel(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareGCPVMInstanceStopFaultByLabel") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -50,11 +55,11 @@ func PrepareVMStopByLabel(computeService *compute.Service, experimentsDetails *e switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err := injectChaosInSerialMode(computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := injectChaosInSerialMode(ctx, computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err := injectChaosInParallelMode(computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := injectChaosInParallelMode(ctx, computeService, experimentsDetails, instanceNamesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -71,7 +76,9 @@ func PrepareVMStopByLabel(computeService *compute.Service, experimentsDetails *e } // injectChaosInSerialMode stops VM instances in serial mode i.e. one after the other -func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectGCPVMInstanceStopFaultByLabelInSerialMode") + defer span.End() select { case <-inject: @@ -112,7 +119,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -156,8 +163,9 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails } // injectChaosInParallelMode will inject the VM instance termination in serial mode that is one after other -func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - +func injectChaosInParallelMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectGCPVMInstanceStopFaultByLabelInParallelMode") + defer span.End() select { case <-inject: // stopping the chaos execution, if abort signal received @@ -200,7 +208,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } diff --git a/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go b/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go index 2cbcfdba4..281e1c211 100644 --- a/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go +++ b/chaoslib/litmus/gcp-vm-instance-stop/lib/gcp-vm-instance-stop.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,15 +10,17 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" gcplib "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "google.golang.org/api/compute/v1" ) @@ -27,7 +30,9 @@ var ( ) // PrepareVMStop contains the prepration and injection steps for the experiment -func PrepareVMStop(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareVMStop(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareVMInstanceStopFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -55,11 +60,11 @@ func PrepareVMStop(computeService *compute.Service, experimentsDetails *experime switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, computeService, experimentsDetails, instanceNamesList, instanceZonesList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -76,7 +81,9 @@ func PrepareVMStop(computeService *compute.Service, experimentsDetails *experime } // injectChaosInSerialMode stops VM instances in serial mode i.e. one after the other -func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, instanceZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, instanceZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectVMInstanceStopFaultInSerialMode") + defer span.End() select { case <-inject: @@ -117,7 +124,7 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails // run the probes during chaos // the OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -161,7 +168,9 @@ func injectChaosInSerialMode(computeService *compute.Service, experimentsDetails } // injectChaosInParallelMode stops VM instances in parallel mode i.e. all at once -func injectChaosInParallelMode(computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, instanceZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, computeService *compute.Service, experimentsDetails *experimentTypes.ExperimentDetails, instanceNamesList []string, instanceZonesList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectVMInstanceStopFaultInParallelMode") + defer span.End() select { case <-inject: @@ -205,7 +214,7 @@ func injectChaosInParallelMode(computeService *compute.Service, experimentsDetai // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } diff --git a/chaoslib/litmus/http-chaos/helper/http-helper.go b/chaoslib/litmus/http-chaos/helper/http-helper.go index 43f339df3..b544df448 100644 --- a/chaoslib/litmus/http-chaos/helper/http-helper.go +++ b/chaoslib/litmus/http-chaos/helper/http-helper.go @@ -1,9 +1,12 @@ package helper import ( + "context" "fmt" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "os" "os/signal" "strconv" @@ -27,7 +30,9 @@ var ( ) // Helper injects the http chaos -func Helper(clients clients.ClientSets) { +func Helper(ctx context.Context, clients clients.ClientSets) { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "SimulatePodHTTPFault") + defer span.End() experimentsDetails := experimentTypes.ExperimentDetails{} eventsDetails := types.EventDetails{} @@ -225,7 +230,7 @@ const NoProxyToKill = "you need to specify whom to kill" // it is using nsenter command to enter into network namespace of target container // and execute the proxy related command inside it. func killProxy(pid int, source string) error { - stopProxyServerCommand := fmt.Sprintf("sudo nsenter -t %d -n sudo kill -9 $(ps aux | grep [t]oxiproxy | awk 'FNR==1{print $1}')", pid) + stopProxyServerCommand := fmt.Sprintf("sudo nsenter -t %d -n sudo kill -9 $(ps aux | grep [t]oxiproxy | awk 'FNR==2{print $2}')", pid) log.Infof("[Chaos]: Stopping proxy server") if err := common.RunBashCommand(stopProxyServerCommand, "failed to stop proxy server", source); err != nil { diff --git a/chaoslib/litmus/http-chaos/lib/header/header.go b/chaoslib/litmus/http-chaos/lib/header/header.go index 737efb2a5..1c822d505 100644 --- a/chaoslib/litmus/http-chaos/lib/header/header.go +++ b/chaoslib/litmus/http-chaos/lib/header/header.go @@ -1,16 +1,22 @@ package header import ( + "context" + http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" ) -//PodHttpModifyHeaderChaos contains the steps to prepare and inject http modify header chaos -func PodHttpModifyHeaderChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PodHttpModifyHeaderChaos contains the steps to prepare and inject http modify header chaos +func PodHttpModifyHeaderChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodHTTPModifyHeaderFault") + defer span.End() log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ "Target Port": experimentsDetails.TargetServicePort, @@ -27,5 +33,5 @@ func PodHttpModifyHeaderChaos(experimentsDetails *experimentTypes.ExperimentDeta stream = "upstream" } args := "-t header --" + stream + " -a headers='" + (experimentsDetails.HeadersMap) + "' -a mode=" + experimentsDetails.HeaderMode - return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return http_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } diff --git a/chaoslib/litmus/http-chaos/lib/http-chaos.go b/chaoslib/litmus/http-chaos/lib/http-chaos.go index 3d2b11600..59323f0b8 100644 --- a/chaoslib/litmus/http-chaos/lib/http-chaos.go +++ b/chaoslib/litmus/http-chaos/lib/http-chaos.go @@ -3,13 +3,16 @@ package lib import ( "context" "fmt" + "os" "strconv" "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -22,8 +25,8 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PrepareAndInjectChaos contains the preparation & injection steps -func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args string) error { +// PrepareAndInjectChaos contains the preparation & injection steps +func PrepareAndInjectChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args string) error { var err error // Get the target pod details for the chaos execution @@ -63,11 +66,11 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, args, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -78,11 +81,13 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails } // injectChaosInSerialMode inject the http chaos in all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, args string, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, args string, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodHTTPFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -102,7 +107,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai }) runID := stringutils.GetRunID() - if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID, args); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID, args); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -135,11 +140,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode inject the http chaos in all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, args string, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, args string, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodHTTPFaultInParallelMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -153,7 +160,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer)) } - if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -186,7 +193,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID, args string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID, args string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateHTTPChaosHelperPod") + defer span.End() privilegedEnable := true terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) @@ -229,7 +238,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie "./helpers -name http-chaos", }, Resources: chaosDetails.Resources, - Env: getPodEnv(experimentsDetails, targets, args), + Env: getPodEnv(ctx, experimentsDetails, targets, args), VolumeMounts: []apiv1.VolumeMount{ { Name: "cri-socket", @@ -263,7 +272,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } // getPodEnv derive all the env required for the helper pod -func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets, args string) []apiv1.EnvVar { +func getPodEnv(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targets, args string) []apiv1.EnvVar { var envDetails common.ENVDetails envDetails.SetEnv("TARGETS", targets). @@ -279,13 +288,15 @@ func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets, a SetEnv("TARGET_SERVICE_PORT", strconv.Itoa(experimentsDetails.TargetServicePort)). SetEnv("PROXY_PORT", strconv.Itoa(experimentsDetails.ProxyPort)). SetEnv("TOXICITY", strconv.Itoa(experimentsDetails.Toxicity)). + SetEnv("OTEL_EXPORTER_OTLP_ENDPOINT", os.Getenv(telemetry.OTELExporterOTLPEndpoint)). + SetEnv("TRACE_PARENT", telemetry.GetMarshalledSpanFromContext(ctx)). SetEnvFromDownwardAPI("v1", "metadata.name") return envDetails.ENV } -//SetChaosTunables will set up a random value within a given range of values -//If the value is not provided in range it'll set up the initial provided value. +// SetChaosTunables will set up a random value within a given range of values +// If the value is not provided in range it'll set up the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.PodsAffectedPerc = common.ValidateRange(experimentsDetails.PodsAffectedPerc) experimentsDetails.Sequence = common.GetRandomSequence(experimentsDetails.Sequence) diff --git a/chaoslib/litmus/http-chaos/lib/latency/latency.go b/chaoslib/litmus/http-chaos/lib/latency/latency.go index df9c11278..7cd7f4816 100644 --- a/chaoslib/litmus/http-chaos/lib/latency/latency.go +++ b/chaoslib/litmus/http-chaos/lib/latency/latency.go @@ -1,18 +1,23 @@ package latency import ( + "context" "strconv" http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" ) -//PodHttpLatencyChaos contains the steps to prepare and inject http latency chaos -func PodHttpLatencyChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PodHttpLatencyChaos contains the steps to prepare and inject http latency chaos +func PodHttpLatencyChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodHttpLatencyFault") + defer span.End() log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ "Target Port": experimentsDetails.TargetServicePort, @@ -24,5 +29,5 @@ func PodHttpLatencyChaos(experimentsDetails *experimentTypes.ExperimentDetails, }) args := "-t latency -a latency=" + strconv.Itoa(experimentsDetails.Latency) - return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return http_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } diff --git a/chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go b/chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go index bce1d26f1..86c136def 100644 --- a/chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go +++ b/chaoslib/litmus/http-chaos/lib/modify-body/modify-body.go @@ -1,20 +1,25 @@ package modifybody import ( + "context" "fmt" "math" "strings" http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" ) // PodHttpModifyBodyChaos contains the steps to prepare and inject http modify body chaos -func PodHttpModifyBodyChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PodHttpModifyBodyChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodHTTPModifyBodyFault") + defer span.End() // responseBodyMaxLength defines the max length of response body string to be printed. It is taken as // the min of length of body and 120 characters to avoid printing large response body. @@ -34,7 +39,7 @@ func PodHttpModifyBodyChaos(experimentsDetails *experimentTypes.ExperimentDetail args := fmt.Sprintf( `-t modify_body -a body="%v" -a content_type=%v -a content_encoding=%v`, EscapeQuotes(experimentsDetails.ResponseBody), experimentsDetails.ContentType, experimentsDetails.ContentEncoding) - return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return http_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } // EscapeQuotes escapes the quotes in the given string diff --git a/chaoslib/litmus/http-chaos/lib/reset/reset.go b/chaoslib/litmus/http-chaos/lib/reset/reset.go index 20838ca3c..9bff4e09d 100644 --- a/chaoslib/litmus/http-chaos/lib/reset/reset.go +++ b/chaoslib/litmus/http-chaos/lib/reset/reset.go @@ -1,18 +1,23 @@ package reset import ( + "context" "strconv" http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" ) -//PodHttpResetPeerChaos contains the steps to prepare and inject http reset peer chaos -func PodHttpResetPeerChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PodHttpResetPeerChaos contains the steps to prepare and inject http reset peer chaos +func PodHttpResetPeerChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodHTTPResetPeerFault") + defer span.End() log.InfoWithValues("[Info]: The chaos tunables are:", logrus.Fields{ "Target Port": experimentsDetails.TargetServicePort, @@ -24,5 +29,5 @@ func PodHttpResetPeerChaos(experimentsDetails *experimentTypes.ExperimentDetails }) args := "-t reset_peer -a timeout=" + strconv.Itoa(experimentsDetails.ResetTimeout) - return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return http_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } diff --git a/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go b/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go index be541f7f1..228e1072d 100644 --- a/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go +++ b/chaoslib/litmus/http-chaos/lib/statuscode/status-code.go @@ -1,17 +1,21 @@ package statuscode import ( + "context" "fmt" - "github.com/litmuschaos/litmus-go/pkg/cerrors" "math" "math/rand" "strconv" "strings" "time" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" + "go.opentelemetry.io/otel" + http_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib" body "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/modify-body" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/types" @@ -26,7 +30,9 @@ var acceptedStatusCodes = []string{ } // PodHttpStatusCodeChaos contains the steps to prepare and inject http status code chaos -func PodHttpStatusCodeChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PodHttpStatusCodeChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodHttpStatusCodeFault") + defer span.End() // responseBodyMaxLength defines the max length of response body string to be printed. It is taken as // the min of length of body and 120 characters to avoid printing large response body. @@ -49,7 +55,7 @@ func PodHttpStatusCodeChaos(experimentsDetails *experimentTypes.ExperimentDetail `-t status_code -a status_code=%s -a modify_response_body=%d -a response_body="%v" -a content_type=%s -a content_encoding=%s`, experimentsDetails.StatusCode, stringBoolToInt(experimentsDetails.ModifyResponseBody), body.EscapeQuotes(experimentsDetails.ResponseBody), experimentsDetails.ContentType, experimentsDetails.ContentEncoding) - return http_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return http_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } // GetStatusCode performs two functions: diff --git a/chaoslib/litmus/k6-loadgen/lib/k6-loadgen.go b/chaoslib/litmus/k6-loadgen/lib/k6-loadgen.go new file mode 100644 index 000000000..79ce56b30 --- /dev/null +++ b/chaoslib/litmus/k6-loadgen/lib/k6-loadgen.go @@ -0,0 +1,184 @@ +package lib + +import ( + "context" + "fmt" + "os" + "strconv" + + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/events" + experimentTypes "github.com/litmuschaos/litmus-go/pkg/load/k6-loadgen/types" + "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/status" + "github.com/litmuschaos/litmus-go/pkg/telemetry" + "github.com/litmuschaos/litmus-go/pkg/types" + "github.com/litmuschaos/litmus-go/pkg/utils/common" + "github.com/litmuschaos/litmus-go/pkg/utils/stringutils" + "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func experimentExecution(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectK6LoadGenFault") + defer span.End() + + if experimentsDetails.EngineName != "" { + msg := "Injecting " + experimentsDetails.ExperimentName + " chaos" + types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) + events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") + } + // run the probes during chaos + if len(resultDetails.ProbeDetails) != 0 { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + return err + } + } + + runID := stringutils.GetRunID() + + // creating the helper pod to perform k6-loadgen chaos + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, runID); err != nil { + return stacktrace.Propagate(err, "could not create helper pod") + } + + appLabel := fmt.Sprintf("app=%s-helper-%s", experimentsDetails.ExperimentName, runID) + + //checking the status of the helper pod, wait till the pod comes to running state else fail the experiment + log.Info("[Status]: Checking the status of the helper pod") + if err := status.CheckHelperStatus(experimentsDetails.ChaosNamespace, appLabel, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil { + common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) + return stacktrace.Propagate(err, "could not check helper status") + } + + // Wait till the completion of the helper pod + // set an upper limit for the waiting time + log.Info("[Wait]: Waiting till the completion of the helper pod") + podStatus, err := status.WaitForCompletion(experimentsDetails.ChaosNamespace, appLabel, clients, experimentsDetails.ChaosDuration+experimentsDetails.Timeout, common.GetContainerNames(chaosDetails)...) + if err != nil || podStatus == "Failed" { + common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) + return common.HelperFailedError(err, appLabel, experimentsDetails.ChaosNamespace, true) + } + + //Deleting all the helper pod for k6-loadgen chaos + log.Info("[Cleanup]: Deleting all the helper pods") + if err = common.DeleteAllPod(appLabel, experimentsDetails.ChaosNamespace, chaosDetails.Timeout, chaosDetails.Delay, clients); err != nil { + return stacktrace.Propagate(err, "could not delete helper pod(s)") + } + + return nil +} + +// PrepareChaos contains the preparation steps before chaos injection +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareK6LoadGenFault") + defer span.End() + + // Waiting for the ramp time before chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + + // Starting the k6-loadgen experiment + if err := experimentExecution(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + return stacktrace.Propagate(err, "could not execute chaos") + } + + // Waiting for the ramp time after chaos injection + if experimentsDetails.RampTime != 0 { + log.Infof("[Ramp]: Waiting for the %vs ramp time after injecting chaos", experimentsDetails.RampTime) + common.WaitForDuration(experimentsDetails.RampTime) + } + return nil +} + +// createHelperPod derive the attributes for helper pod and create the helper pod +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, runID string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateK6LoadGenFaultHelperPod") + defer span.End() + + const volumeName = "script-volume" + const mountPath = "/mnt" + + var envs []corev1.EnvVar + args := []string{ + mountPath + "/" + experimentsDetails.ScriptSecretKey, + "-q", + "--duration", + strconv.Itoa(experimentsDetails.ChaosDuration) + "s", + "--tag", + "trace_id=" + span.SpanContext().TraceID().String(), + } + + if otelExporterEndpoint := os.Getenv(telemetry.OTELExporterOTLPEndpoint); otelExporterEndpoint != "" { + envs = []corev1.EnvVar{ + { + Name: "K6_OTEL_METRIC_PREFIX", + Value: experimentsDetails.OTELMetricPrefix, + }, + { + Name: "K6_OTEL_GRPC_EXPORTER_INSECURE", + Value: "true", + }, + { + Name: "K6_OTEL_GRPC_EXPORTER_ENDPOINT", + Value: otelExporterEndpoint, + }, + } + args = append(args, "--out", "experimental-opentelemetry") + } + + helperPod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + GenerateName: experimentsDetails.ExperimentName + "-helper-", + Namespace: experimentsDetails.ChaosNamespace, + Labels: common.GetHelperLabels(chaosDetails.Labels, runID, experimentsDetails.ExperimentName), + Annotations: chaosDetails.Annotations, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + ImagePullSecrets: chaosDetails.ImagePullSecrets, + Containers: []corev1.Container{ + { + Name: experimentsDetails.ExperimentName, + Image: experimentsDetails.LIBImage, + ImagePullPolicy: corev1.PullPolicy(experimentsDetails.LIBImagePullPolicy), + Command: []string{ + "k6", + "run", + }, + Args: args, + Env: envs, + Resources: chaosDetails.Resources, + VolumeMounts: []corev1.VolumeMount{ + { + Name: volumeName, + MountPath: mountPath, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: volumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: experimentsDetails.ScriptSecretName, + }, + }, + }, + }, + }, + } + + _, err := clients.KubeClient.CoreV1().Pods(experimentsDetails.ChaosNamespace).Create(context.Background(), helperPod, v1.CreateOptions{}) + if err != nil { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: fmt.Sprintf("unable to create helper pod: %s", err.Error())} + } + return nil +} diff --git a/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go b/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go index 9a78b1896..d828f614d 100644 --- a/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go +++ b/chaoslib/litmus/kafka-broker-pod-failure/lib/pod-delete.go @@ -8,10 +8,12 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/workloads" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/kafka/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -24,7 +26,9 @@ import ( ) // PreparePodDelete contains the prepration steps before chaos injection -func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PreparePodDelete(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareKafkaPodDeleteFault") + defer span.End() //Waiting for the ramp time before chaos injection if experimentsDetails.ChaoslibDetail.RampTime != 0 { @@ -34,11 +38,11 @@ func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, cli switch strings.ToLower(experimentsDetails.ChaoslibDetail.Sequence) { case "serial": - if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { + if err := injectChaosInSerialMode(ctx, experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { + if err := injectChaosInParallelMode(ctx, experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -54,11 +58,12 @@ func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, cli } // injectChaosInSerialMode delete the kafka broker pods in serial mode(one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { - +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectKafkaPodDeleteFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -149,11 +154,12 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode delete the kafka broker pods in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { - +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectKafkaPodDeleteFaultInParallelMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } diff --git a/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go b/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go index 8de74c5ce..350a8b390 100644 --- a/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go +++ b/chaoslib/litmus/kubelet-service-kill/lib/kubelet-service-kill.go @@ -6,9 +6,11 @@ import ( "strconv" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/kubelet-service-kill/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -23,7 +25,9 @@ import ( ) // PrepareKubeletKill contains prepration steps before chaos injection -func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareKubeletKill(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareKubeletServiceKillFault") + defer span.End() var err error if experimentsDetails.TargetNode == "" { @@ -59,7 +63,7 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c } // Creating the helper pod to perform node memory hog - if err = createHelperPod(experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil { + if err = createHelperPod(ctx, experimentsDetails, clients, chaosDetails, experimentsDetails.TargetNode); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -76,7 +80,7 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) return err } @@ -112,7 +116,9 @@ func PrepareKubeletKill(experimentsDetails *experimentTypes.ExperimentDetails, c } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appNodeName string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, appNodeName string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateKubeletServiceKillFaultHelperPod") + defer span.End() privileged := true terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) diff --git a/chaoslib/litmus/network-chaos/helper/netem.go b/chaoslib/litmus/network-chaos/helper/netem.go index c0193f518..b5d200c24 100644 --- a/chaoslib/litmus/network-chaos/helper/netem.go +++ b/chaoslib/litmus/network-chaos/helper/netem.go @@ -1,10 +1,13 @@ package helper import ( + "context" "fmt" "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/events" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "os" "os/exec" "os/signal" @@ -34,7 +37,9 @@ var ( ) // Helper injects the network chaos -func Helper(clients clients.ClientSets) { +func Helper(ctx context.Context, clients clients.ClientSets) { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "SimulatePodNetworkFault") + defer span.End() experimentsDetails := experimentTypes.ExperimentDetails{} eventsDetails := types.EventDetails{} @@ -76,7 +81,7 @@ func Helper(clients clients.ClientSets) { } -//preparePodNetworkChaos contains the prepration steps before chaos injection +// preparePodNetworkChaos contains the prepration steps before chaos injection func preparePodNetworkChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error { targetEnv := os.Getenv("TARGETS") @@ -130,7 +135,7 @@ func preparePodNetworkChaos(experimentsDetails *experimentTypes.ExperimentDetail } log.Infof("successfully injected chaos on target: {name: %s, namespace: %v, container: %v}", t.Name, t.Namespace, t.TargetContainer) if err = result.AnnotateChaosResult(resultDetails.Name, chaosDetails.ChaosNamespace, "injected", "pod", t.Name); err != nil { - if _, revertErr := killnetem(t, experimentsDetails.NetworkInterface); err != nil { + if _, revertErr := killnetem(t, experimentsDetails.NetworkInterface); revertErr != nil { return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(revertErr).Error())} } return stacktrace.Propagate(err, "could not annotate chaosresult") @@ -295,7 +300,7 @@ type targetDetails struct { Source string } -//getENV fetches all the env variables from the runner pod +// getENV fetches all the env variables from the runner pod func getENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") diff --git a/chaoslib/litmus/network-chaos/lib/corruption/corruption.go b/chaoslib/litmus/network-chaos/lib/corruption/corruption.go index 78974f0f9..4d78450da 100644 --- a/chaoslib/litmus/network-chaos/lib/corruption/corruption.go +++ b/chaoslib/litmus/network-chaos/lib/corruption/corruption.go @@ -1,15 +1,21 @@ package corruption import ( + "context" + network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" + "go.opentelemetry.io/otel" ) -//PodNetworkCorruptionChaos contains the steps to prepare and inject chaos -func PodNetworkCorruptionChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PodNetworkCorruptionChaos contains the steps to prepare and inject chaos +func PodNetworkCorruptionChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodNetworkCorruptionFault") + defer span.End() args := "corrupt " + experimentsDetails.NetworkPacketCorruptionPercentage - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return network_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } diff --git a/chaoslib/litmus/network-chaos/lib/duplication/duplication.go b/chaoslib/litmus/network-chaos/lib/duplication/duplication.go index a5705f0a2..9ceae9fe6 100644 --- a/chaoslib/litmus/network-chaos/lib/duplication/duplication.go +++ b/chaoslib/litmus/network-chaos/lib/duplication/duplication.go @@ -1,15 +1,21 @@ package duplication import ( + "context" + network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" + "go.opentelemetry.io/otel" ) -//PodNetworkDuplicationChaos contains the steps to prepare and inject chaos -func PodNetworkDuplicationChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PodNetworkDuplicationChaos contains the steps to prepare and inject chaos +func PodNetworkDuplicationChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodNetworkDuplicationFault") + defer span.End() args := "duplicate " + experimentsDetails.NetworkPacketDuplicationPercentage - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return network_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } diff --git a/chaoslib/litmus/network-chaos/lib/latency/latency.go b/chaoslib/litmus/network-chaos/lib/latency/latency.go index c5482f1d9..e3f849fbb 100644 --- a/chaoslib/litmus/network-chaos/lib/latency/latency.go +++ b/chaoslib/litmus/network-chaos/lib/latency/latency.go @@ -1,17 +1,22 @@ package latency import ( + "context" "strconv" network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" + "go.opentelemetry.io/otel" ) -//PodNetworkLatencyChaos contains the steps to prepare and inject chaos -func PodNetworkLatencyChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PodNetworkLatencyChaos contains the steps to prepare and inject chaos +func PodNetworkLatencyChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodNetworkLatencyFault") + defer span.End() args := "delay " + strconv.Itoa(experimentsDetails.NetworkLatency) + "ms " + strconv.Itoa(experimentsDetails.Jitter) + "ms" - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return network_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } diff --git a/chaoslib/litmus/network-chaos/lib/loss/loss.go b/chaoslib/litmus/network-chaos/lib/loss/loss.go index 419f37da9..336c57a66 100644 --- a/chaoslib/litmus/network-chaos/lib/loss/loss.go +++ b/chaoslib/litmus/network-chaos/lib/loss/loss.go @@ -1,15 +1,21 @@ package loss import ( + "context" + network_chaos "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" + "go.opentelemetry.io/otel" ) -//PodNetworkLossChaos contains the steps to prepare and inject chaos -func PodNetworkLossChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PodNetworkLossChaos contains the steps to prepare and inject chaos +func PodNetworkLossChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodNetworkLossFault") + defer span.End() args := "loss " + experimentsDetails.NetworkPacketLossPercentage - return network_chaos.PrepareAndInjectChaos(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) + return network_chaos.PrepareAndInjectChaos(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails, args) } diff --git a/chaoslib/litmus/network-chaos/lib/network-chaos.go b/chaoslib/litmus/network-chaos/lib/network-chaos.go index e956e346c..dc73645f8 100644 --- a/chaoslib/litmus/network-chaos/lib/network-chaos.go +++ b/chaoslib/litmus/network-chaos/lib/network-chaos.go @@ -4,14 +4,17 @@ import ( "context" "fmt" "net" + "os" "strconv" "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" k8serrors "k8s.io/apimachinery/pkg/api/errors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -28,8 +31,8 @@ var serviceMesh = []string{"istio", "envoy"} var destIpsSvcMesh string var destIps string -//PrepareAndInjectChaos contains the preparation & injection steps -func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args string) error { +// PrepareAndInjectChaos contains the preparation & injection steps +func PrepareAndInjectChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, args string) error { var err error // Get the target pod details for the chaos execution @@ -69,11 +72,11 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, args, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -84,10 +87,13 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails } // injectChaosInSerialMode inject the network chaos in all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodNetworkFaultInSerialMode") + defer span.End() + // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -107,7 +113,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai runID := stringutils.GetRunID() - if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer, serviceMesh), pod.Spec.NodeName, runID, args); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer, serviceMesh), pod.Spec.NodeName, runID, args); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -140,12 +146,14 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode inject the network chaos in all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, args string, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodNetworkFaultInParallelMode") + defer span.End() var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -163,7 +171,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s:%s", k.Name, k.Namespace, k.TargetContainer, k.ServiceMesh)) } - if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID, args); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -196,7 +204,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets string, nodeName, runID, args string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets string, nodeName, runID, args string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreatePodNetworkFaultHelperPod") + defer span.End() privilegedEnable := true terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) @@ -239,7 +249,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie "./helpers -name network-chaos", }, Resources: chaosDetails.Resources, - Env: getPodEnv(experimentsDetails, targets, args), + Env: getPodEnv(ctx, experimentsDetails, targets, args), VolumeMounts: []apiv1.VolumeMount{ { Name: "cri-socket", @@ -273,7 +283,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } // getPodEnv derive all the env required for the helper pod -func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string, args string) []apiv1.EnvVar { +func getPodEnv(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targets string, args string) []apiv1.EnvVar { var envDetails common.ENVDetails envDetails.SetEnv("TARGETS", targets). @@ -291,6 +301,8 @@ func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets st SetEnv("DESTINATION_IPS_SERVICE_MESH", destIpsSvcMesh). SetEnv("SOURCE_PORTS", experimentsDetails.SourcePorts). SetEnv("DESTINATION_PORTS", experimentsDetails.DestinationPorts). + SetEnv("OTEL_EXPORTER_OTLP_ENDPOINT", os.Getenv(telemetry.OTELExporterOTLPEndpoint)). + SetEnv("TRACE_PARENT", telemetry.GetMarshalledSpanFromContext(ctx)). SetEnvFromDownwardAPI("v1", "metadata.name") return envDetails.ENV @@ -358,6 +370,9 @@ func getPodIPFromService(host string, clients clients.ClientSets) ([]string, err return ips, cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Target: fmt.Sprintf("{svcName: %s,podLabel: %s, namespace: %s}", svcNs, svcSelector, svcNs), Reason: fmt.Sprintf("failed to derive pods from service: %s", err.Error())} } for _, p := range pods.Items { + if p.Status.PodIP == "" { + continue + } ips = append(ips, p.Status.PodIP) } @@ -411,8 +426,8 @@ func getIpsForTargetHosts(targetHosts string, clients clients.ClientSets, servic return strings.Join(commaSeparatedIPs, ","), nil } -//SetChaosTunables will set up a random value within a given range of values -//If the value is not provided in range it'll set up the initial provided value. +// SetChaosTunables will set up a random value within a given range of values +// If the value is not provided in range it'll set up the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.NetworkPacketLossPercentage = common.ValidateRange(experimentsDetails.NetworkPacketLossPercentage) experimentsDetails.NetworkPacketCorruptionPercentage = common.ValidateRange(experimentsDetails.NetworkPacketCorruptionPercentage) diff --git a/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go b/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go index 6e9e1f0b8..57bd9adb0 100644 --- a/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go +++ b/chaoslib/litmus/node-cpu-hog/lib/node-cpu-hog.go @@ -7,9 +7,11 @@ import ( "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -24,7 +26,9 @@ import ( ) // PrepareNodeCPUHog contains preparation steps before chaos injection -func PrepareNodeCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareNodeCPUHog(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareNodeCPUHogFault") + defer span.End() //set up the tunables if provided in range setChaosTunables(experimentsDetails) @@ -62,11 +66,11 @@ func PrepareNodeCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, cl switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -82,13 +86,15 @@ func PrepareNodeCPUHog(experimentsDetails *experimentTypes.ExperimentDetails, cl } // injectChaosInSerialMode stress the cpu of all the target nodes serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeCPUHogFaultInSerialMode") + defer span.End() nodeCPUCores := experimentsDetails.NodeCPUcores // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -116,7 +122,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai experimentsDetails.RunID = stringutils.GetRunID() // Creating the helper pod to perform node cpu hog - if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { + if err := createHelperPod(ctx, experimentsDetails, chaosDetails, appNode, clients); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -149,12 +155,15 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode stress the cpu of all the target nodes in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeCPUHogFaultInParallelMode") + defer span.End() + nodeCPUCores := experimentsDetails.NodeCPUcores // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -182,7 +191,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet }) // Creating the helper pod to perform node cpu hog - if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { + if err := createHelperPod(ctx, experimentsDetails, chaosDetails, appNode, clients); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -217,7 +226,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet return nil } -//setCPUCapacity fetch the node cpu capacity +// setCPUCapacity fetch the node cpu capacity func setCPUCapacity(experimentsDetails *experimentTypes.ExperimentDetails, appNode string, clients clients.ClientSets) error { node, err := clients.KubeClient.CoreV1().Nodes().Get(context.Background(), appNode, v1.GetOptions{}) if err != nil { @@ -228,7 +237,9 @@ func setCPUCapacity(experimentsDetails *experimentTypes.ExperimentDetails, appNo } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateNodeCPUHogFaultHelperPod") + defer span.End() terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) @@ -278,8 +289,8 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chao return nil } -//setChaosTunables will set up a random value within a given range of values -//If the value is not provided in range it'll set up the initial provided value. +// setChaosTunables will set up a random value within a given range of values +// If the value is not provided in range it'll set up the initial provided value. func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.NodeCPUcores = common.ValidateRange(experimentsDetails.NodeCPUcores) experimentsDetails.CPULoad = common.ValidateRange(experimentsDetails.CPULoad) diff --git a/chaoslib/litmus/node-drain/lib/node-drain.go b/chaoslib/litmus/node-drain/lib/node-drain.go index 860c675ee..b46dd34bf 100644 --- a/chaoslib/litmus/node-drain/lib/node-drain.go +++ b/chaoslib/litmus/node-drain/lib/node-drain.go @@ -3,8 +3,6 @@ package lib import ( "context" "fmt" - "github.com/litmuschaos/litmus-go/pkg/cerrors" - "github.com/palantir/stacktrace" "os" "os/exec" "os/signal" @@ -13,7 +11,12 @@ import ( "syscall" "time" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" + "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" + + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-drain/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -31,8 +34,10 @@ var ( inject, abort chan os.Signal ) -//PrepareNodeDrain contains the preparation steps before chaos injection -func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PrepareNodeDrain contains the preparation steps before chaos injection +func PrepareNodeDrain(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareNodeDrainFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -66,7 +71,7 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -75,7 +80,7 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli go abortWatcher(experimentsDetails, clients, resultDetails, chaosDetails, eventsDetails) // Drain the application node - if err := drainNode(experimentsDetails, clients, chaosDetails); err != nil { + if err := drainNode(ctx, experimentsDetails, clients, chaosDetails); err != nil { log.Info("[Revert]: Reverting chaos because error during draining of node") if uncordonErr := uncordonNode(experimentsDetails, clients, chaosDetails); uncordonErr != nil { return cerrors.PreserveError{ErrString: fmt.Sprintf("[%s,%s]", stacktrace.RootCause(err).Error(), stacktrace.RootCause(uncordonErr).Error())} @@ -125,7 +130,9 @@ func PrepareNodeDrain(experimentsDetails *experimentTypes.ExperimentDetails, cli } // drainNode drain the target node -func drainNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { +func drainNode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeDrainFault") + defer span.End() select { case <-inject: diff --git a/chaoslib/litmus/node-io-stress/lib/node-io-stress.go b/chaoslib/litmus/node-io-stress/lib/node-io-stress.go index abcc1530d..94b269a81 100644 --- a/chaoslib/litmus/node-io-stress/lib/node-io-stress.go +++ b/chaoslib/litmus/node-io-stress/lib/node-io-stress.go @@ -7,9 +7,11 @@ import ( "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-io-stress/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -24,8 +26,9 @@ import ( ) // PrepareNodeIOStress contains preparation steps before chaos injection -func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - +func PrepareNodeIOStress(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareNodeIOStressFault") + defer span.End() //set up the tunables if provided in range setChaosTunables(experimentsDetails) @@ -63,11 +66,11 @@ func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -83,11 +86,13 @@ func PrepareNodeIOStress(experimentsDetails *experimentTypes.ExperimentDetails, } // injectChaosInSerialMode stress the io of all the target nodes serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeIOStressFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -109,7 +114,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai experimentsDetails.RunID = stringutils.GetRunID() // Creating the helper pod to perform node io stress - if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { + if err := createHelperPod(ctx, experimentsDetails, chaosDetails, appNode, clients); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -141,11 +146,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode stress the io of all the target nodes in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeIOStressFaultInParallelMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -167,7 +174,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet }) // Creating the helper pod to perform node io stress - if err := createHelperPod(experimentsDetails, chaosDetails, appNode, clients); err != nil { + if err := createHelperPod(ctx, experimentsDetails, chaosDetails, appNode, clients); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -205,8 +212,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets) error { - +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateNodeIOStressFaultHelperPod") + defer span.End() terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) helperPod := &apiv1.Pod{ @@ -287,8 +295,8 @@ func getContainerArguments(experimentsDetails *experimentTypes.ExperimentDetails return stressArgs } -//setChaosTunables will set up a random value within a given range of values -//If the value is not provided in range it'll set up the initial provided value. +// setChaosTunables will set up a random value within a given range of values +// If the value is not provided in range it'll set up the initial provided value. func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.FilesystemUtilizationBytes = common.ValidateRange(experimentsDetails.FilesystemUtilizationBytes) experimentsDetails.FilesystemUtilizationPercentage = common.ValidateRange(experimentsDetails.FilesystemUtilizationPercentage) diff --git a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go b/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go index 9ab8bc251..6562d817f 100644 --- a/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go +++ b/chaoslib/litmus/node-memory-hog/lib/node-memory-hog.go @@ -7,9 +7,11 @@ import ( "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-memory-hog/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -25,7 +27,9 @@ import ( ) // PrepareNodeMemoryHog contains preparation steps before chaos injection -func PrepareNodeMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareNodeMemoryHog(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareNodeMemoryHogFault") + defer span.End() //set up the tunables if provided in range setChaosTunables(experimentsDetails) @@ -64,11 +68,11 @@ func PrepareNodeMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetNodeList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -84,11 +88,13 @@ func PrepareNodeMemoryHog(experimentsDetails *experimentTypes.ExperimentDetails, } // injectChaosInSerialMode stress the memory of all the target nodes serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeMemoryHogFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -122,7 +128,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // Creating the helper pod to perform node memory hog - if err = createHelperPod(experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil { + if err = createHelperPod(ctx, experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -158,11 +164,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode stress the memory all the target nodes in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetNodeList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeMemoryHogFaultInParallelMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -196,7 +204,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // Creating the helper pod to perform node memory hog - if err = createHelperPod(experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil { + if err = createHelperPod(ctx, experimentsDetails, chaosDetails, appNode, clients, MemoryConsumption); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -312,7 +320,9 @@ func calculateMemoryConsumption(experimentsDetails *experimentTypes.ExperimentDe } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets, MemoryConsumption string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, appNode string, clients clients.ClientSets, MemoryConsumption string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateNodeMemoryHogFaultHelperPod") + defer span.End() terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) @@ -362,8 +372,8 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chao return nil } -//setChaosTunables will set up a random value within a given range of values -//If the value is not provided in range it'll set up the initial provided value. +// setChaosTunables will set up a random value within a given range of values +// If the value is not provided in range it'll set up the initial provided value. func setChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.MemoryConsumptionMebibytes = common.ValidateRange(experimentsDetails.MemoryConsumptionMebibytes) experimentsDetails.MemoryConsumptionPercentage = common.ValidateRange(experimentsDetails.MemoryConsumptionPercentage) diff --git a/chaoslib/litmus/node-restart/lib/node-restart.go b/chaoslib/litmus/node-restart/lib/node-restart.go index 98ffdd157..d4750912e 100644 --- a/chaoslib/litmus/node-restart/lib/node-restart.go +++ b/chaoslib/litmus/node-restart/lib/node-restart.go @@ -7,9 +7,11 @@ import ( "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-restart/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -38,7 +40,9 @@ const ( ) // PrepareNodeRestart contains preparation steps before chaos injection -func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareNodeRestart(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareNodeRestartFault") + defer span.End() //Select the node if experimentsDetails.TargetNode == "" { @@ -81,7 +85,7 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c } // Creating the helper pod to perform node restart - if err = createHelperPod(experimentsDetails, chaosDetails, clients); err != nil { + if err = createHelperPod(ctx, experimentsDetails, chaosDetails, clients); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -98,7 +102,7 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { common.DeleteAllHelperPodBasedOnJobCleanupPolicy(appLabel, chaosDetails, clients) return err } @@ -127,10 +131,12 @@ func PrepareNodeRestart(experimentsDetails *experimentTypes.ExperimentDetails, c } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, clients clients.ClientSets) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, chaosDetails *types.ChaosDetails, clients clients.ClientSets) error { // This method is attaching emptyDir along with secret volume, and copy data from secret // to the emptyDir, because secret is mounted as readonly and with 777 perms and it can't be changed // because of: https://github.com/kubernetes/kubernetes/issues/57923 + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreateNodeRestartFaultHelperPod") + defer span.End() terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) diff --git a/chaoslib/litmus/node-taint/lib/node-taint.go b/chaoslib/litmus/node-taint/lib/node-taint.go index 1d48078e8..8efd0a289 100644 --- a/chaoslib/litmus/node-taint/lib/node-taint.go +++ b/chaoslib/litmus/node-taint/lib/node-taint.go @@ -3,15 +3,18 @@ package lib import ( "context" "fmt" - "github.com/litmuschaos/litmus-go/pkg/cerrors" - "github.com/palantir/stacktrace" "os" "os/signal" "strings" "syscall" "time" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" + "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" + + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-taint/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -28,8 +31,10 @@ var ( inject, abort chan os.Signal ) -//PrepareNodeTaint contains the preparation steps before chaos injection -func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PrepareNodeTaint contains the preparation steps before chaos injection +func PrepareNodeTaint(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareNodeTaintFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -63,7 +68,7 @@ func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, cli // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -72,7 +77,7 @@ func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, cli go abortWatcher(experimentsDetails, clients, resultDetails, chaosDetails, eventsDetails) // taint the application node - if err := taintNode(experimentsDetails, clients, chaosDetails); err != nil { + if err := taintNode(ctx, experimentsDetails, clients, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not taint node") } @@ -117,7 +122,9 @@ func PrepareNodeTaint(experimentsDetails *experimentTypes.ExperimentDetails, cli } // taintNode taint the application node -func taintNode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { +func taintNode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectNodeTaintFault") + defer span.End() // get the taint labels & effect taintKey, taintValue, taintEffect := getTaintDetails(experimentsDetails) diff --git a/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go b/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go index 813d514ec..feefad7f6 100644 --- a/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go +++ b/chaoslib/litmus/pod-autoscaler/lib/pod-autoscaler.go @@ -3,15 +3,18 @@ package lib import ( "context" "fmt" - "github.com/litmuschaos/litmus-go/pkg/cerrors" - "github.com/palantir/stacktrace" "os" "os/signal" "strings" "syscall" "time" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" + "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" + + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/math" @@ -31,8 +34,10 @@ var ( appsv1StatefulsetClient appsv1.StatefulSetInterface ) -//PreparePodAutoscaler contains the preparation steps and chaos injection steps -func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PreparePodAutoscaler contains the preparation steps and chaos injection steps +func PreparePodAutoscaler(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodAutoscalerFault") + defer span.End() //Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { @@ -64,7 +69,7 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, //calling go routine which will continuously watch for the abort signal go abortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails) - if err = podAutoscalerChaosInDeployment(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = podAutoscalerChaosInDeployment(ctx, experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not scale deployment") } @@ -91,7 +96,7 @@ func PreparePodAutoscaler(experimentsDetails *experimentTypes.ExperimentDetails, //calling go routine which will continuously watch for the abort signal go abortPodAutoScalerChaos(appsUnderTest, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails) - if err = podAutoscalerChaosInStatefulset(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = podAutoscalerChaosInStatefulset(ctx, experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not scale statefulset") } @@ -117,7 +122,7 @@ func getSliceOfTotalApplicationsTargeted(appList []experimentTypes.ApplicationUn return appList[:newAppListLength] } -//getDeploymentDetails is used to get the name and total number of replicas of the deployment +// getDeploymentDetails is used to get the name and total number of replicas of the deployment func getDeploymentDetails(experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) { deploymentList, err := appsv1DeploymentClient.List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel}) @@ -135,7 +140,7 @@ func getDeploymentDetails(experimentsDetails *experimentTypes.ExperimentDetails) return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails), nil } -//getStatefulsetDetails is used to get the name and total number of replicas of the statefulsets +// getStatefulsetDetails is used to get the name and total number of replicas of the statefulsets func getStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails) ([]experimentTypes.ApplicationUnderTest, error) { statefulsetList, err := appsv1StatefulsetClient.List(context.Background(), metav1.ListOptions{LabelSelector: experimentsDetails.AppLabel}) @@ -154,8 +159,8 @@ func getStatefulsetDetails(experimentsDetails *experimentTypes.ExperimentDetails return getSliceOfTotalApplicationsTargeted(appsUnderTest, experimentsDetails), nil } -//podAutoscalerChaosInDeployment scales up the replicas of deployment and verify the status -func podAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// podAutoscalerChaosInDeployment scales up the replicas of deployment and verify the status +func podAutoscalerChaosInDeployment(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // Scale Application retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error { @@ -182,11 +187,11 @@ func podAutoscalerChaosInDeployment(experimentsDetails *experimentTypes.Experime } log.Info("[Info]: The application started scaling") - return deploymentStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails) + return deploymentStatusCheck(ctx, experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails) } -//podAutoscalerChaosInStatefulset scales up the replicas of statefulset and verify the status -func podAutoscalerChaosInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// podAutoscalerChaosInStatefulset scales up the replicas of statefulset and verify the status +func podAutoscalerChaosInStatefulset(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // Scale Application retryErr := retries.RetryOnConflict(retries.DefaultRetry, func() error { @@ -212,11 +217,11 @@ func podAutoscalerChaosInStatefulset(experimentsDetails *experimentTypes.Experim } log.Info("[Info]: The application started scaling") - return statefulsetStatusCheck(experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails) + return statefulsetStatusCheck(ctx, experimentsDetails, clients, appsUnderTest, resultDetails, eventsDetails, chaosDetails) } // deploymentStatusCheck check the status of deployment and verify the available replicas -func deploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func deploymentStatusCheck(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -246,7 +251,7 @@ func deploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -261,7 +266,7 @@ func deploymentStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails } // statefulsetStatusCheck check the status of statefulset and verify the available replicas -func statefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func statefulsetStatusCheck(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { //ChaosStartTimeStamp contains the start timestamp, when the chaos injection begin ChaosStartTimeStamp := time.Now() @@ -291,7 +296,7 @@ func statefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetail // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -305,7 +310,7 @@ func statefulsetStatusCheck(experimentsDetails *experimentTypes.ExperimentDetail return nil } -//autoscalerRecoveryInDeployment rollback the replicas to initial values in deployment +// autoscalerRecoveryInDeployment rollback the replicas to initial values in deployment func autoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, chaosDetails *types.ChaosDetails) error { // Scale back to initial number of replicas @@ -351,7 +356,7 @@ func autoscalerRecoveryInDeployment(experimentsDetails *experimentTypes.Experime }) } -//autoscalerRecoveryInStatefulset rollback the replicas to initial values in deployment +// autoscalerRecoveryInStatefulset rollback the replicas to initial values in deployment func autoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, appsUnderTest []experimentTypes.ApplicationUnderTest, chaosDetails *types.ChaosDetails) error { // Scale back to initial number of replicas @@ -399,7 +404,7 @@ func autoscalerRecoveryInStatefulset(experimentsDetails *experimentTypes.Experim func int32Ptr(i int32) *int32 { return &i } -//abortPodAutoScalerChaos go routine will continuously watch for the abort signal for the entire chaos duration and generate the required events and result +// abortPodAutoScalerChaos go routine will continuously watch for the abort signal for the entire chaos duration and generate the required events and result func abortPodAutoScalerChaos(appsUnderTest []experimentTypes.ApplicationUnderTest, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) { // signChan channel is used to transmit signal notifications. diff --git a/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go b/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go index 0e4c86caa..f28be58f7 100644 --- a/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go +++ b/chaoslib/litmus/pod-cpu-hog-exec/lib/pod-cpu-hog-exec.go @@ -1,16 +1,20 @@ package lib import ( + "context" "fmt" - "github.com/litmuschaos/litmus-go/pkg/cerrors" - "github.com/palantir/stacktrace" "os" "os/signal" "strings" "syscall" "time" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" + "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" + + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-cpu-hog-exec/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -25,9 +29,10 @@ import ( var inject chan os.Signal -//PrepareCPUExecStress contains the chaos preparation and injection steps -func PrepareCPUExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - +// PrepareCPUExecStress contains the chaos preparation and injection steps +func PrepareCPUExecStress(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodCPUHogExecFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) // Catch and relay certain signal(s) to inject channel. @@ -39,7 +44,7 @@ func PrepareCPUExecStress(experimentsDetails *experimentTypes.ExperimentDetails, common.WaitForDuration(experimentsDetails.RampTime) } //Starting the CPU stress experiment - if err := experimentCPU(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := experimentCPU(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not stress cpu") } //Waiting for the ramp time after chaos injection @@ -54,7 +59,7 @@ func PrepareCPUExecStress(experimentsDetails *experimentTypes.ExperimentDetails, // The function will be constantly increasing the CPU utilisation until it reaches the maximum available or allowed number. // Using the TOTAL_CHAOS_DURATION we will need to specify for how long this experiment will last func stressCPU(experimentsDetails *experimentTypes.ExperimentDetails, podName, ns string, clients clients.ClientSets, stressErr chan error) { - // It will contains all the pod & container details required for exec command + // It will contain all the pod & container details required for exec command execCommandDetails := litmusexec.PodDetails{} command := []string{"/bin/sh", "-c", experimentsDetails.ChaosInjectCmd} litmusexec.SetExecCommandAttributes(&execCommandDetails, podName, experimentsDetails.TargetContainer, ns) @@ -62,9 +67,8 @@ func stressCPU(experimentsDetails *experimentTypes.ExperimentDetails, podName, n stressErr <- err } -//experimentCPU function orchestrates the experiment by calling the StressCPU function for every core, of every container, of every pod that is targeted -func experimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - +// experimentCPU function orchestrates the experiment by calling the StressCPU function for every core, of every container, of every pod that is targeted +func experimentCPU(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { @@ -85,11 +89,11 @@ func experimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, client experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -100,11 +104,13 @@ func experimentCPU(experimentsDetails *experimentTypes.ExperimentDetails, client } // injectChaosInSerialMode stressed the cpu of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodCPUHogExecFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -197,13 +203,16 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode stressed the cpu of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodCPUHogExecFaultInParallelMode") + defer span.End() + // creating err channel to receive the error from the go routine stressErr := make(chan error) // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -287,7 +296,8 @@ loop: } // killStressCPUSerial function to kill a stress process running inside target container -// Triggered by either timeout of chaos duration or termination of the experiment +// +// Triggered by either timeout of chaos duration or termination of the experiment func killStressCPUSerial(experimentsDetails *experimentTypes.ExperimentDetails, podName, ns string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { // It will contain all the pod & container details required for exec command execCommandDetails := litmusexec.PodDetails{} diff --git a/chaoslib/litmus/pod-delete/lib/pod-delete.go b/chaoslib/litmus/pod-delete/lib/pod-delete.go index a513beb31..aa4fec6e8 100644 --- a/chaoslib/litmus/pod-delete/lib/pod-delete.go +++ b/chaoslib/litmus/pod-delete/lib/pod-delete.go @@ -8,23 +8,26 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - "github.com/litmuschaos/litmus-go/pkg/workloads" - "github.com/palantir/stacktrace" - - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-delete/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" "github.com/litmuschaos/litmus-go/pkg/status" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" + "github.com/litmuschaos/litmus-go/pkg/workloads" + "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// PreparePodDelete contains the prepration steps before chaos injection -func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PreparePodDelete contains the preparation steps before chaos injection +func PreparePodDelete(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodDeleteFault") + defer span.End() //Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { @@ -42,11 +45,11 @@ func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, cli switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { + if err := injectChaosInSerialMode(ctx, experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { + if err := injectChaosInParallelMode(ctx, experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -62,11 +65,13 @@ func PreparePodDelete(experimentsDetails *experimentTypes.ExperimentDetails, cli } // injectChaosInSerialMode delete the target application pods serial mode(one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodDeleteFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -159,11 +164,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode delete the target application pods in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodDeleteFaultInParallelMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } diff --git a/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go b/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go index c80a5a1fc..04b895f9b 100644 --- a/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go +++ b/chaoslib/litmus/pod-dns-chaos/helper/dnschaos.go @@ -2,9 +2,12 @@ package helper import ( "bytes" + "context" "fmt" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "os" "os/exec" "os/signal" @@ -34,7 +37,9 @@ const ( ) // Helper injects the dns chaos -func Helper(clients clients.ClientSets) { +func Helper(ctx context.Context, clients clients.ClientSets) { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "SimulatePodDNSFault") + defer span.End() experimentsDetails := experimentTypes.ExperimentDetails{} eventsDetails := types.EventDetails{} @@ -74,7 +79,7 @@ func Helper(clients clients.ClientSets) { } -//preparePodDNSChaos contains the preparation steps before chaos injection +// preparePodDNSChaos contains the preparation steps before chaos injection func preparePodDNSChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails) error { targetList, err := common.ParseTargets(chaosDetails.ChaosPodName) @@ -264,7 +269,7 @@ func abortWatcher(targets []targetDetails, resultName, chaosNS string) { os.Exit(1) } -//getENV fetches all the env variables from the runner pod +// getENV fetches all the env variables from the runner pod func getENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") experimentDetails.InstanceID = types.Getenv("INSTANCE_ID", "") diff --git a/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go b/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go index 3910a48f1..ce608e564 100644 --- a/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go +++ b/chaoslib/litmus/pod-dns-chaos/lib/pod-dns-chaos.go @@ -3,13 +3,16 @@ package lib import ( "context" "fmt" + "os" "strconv" "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-dns-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -23,9 +26,10 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PrepareAndInjectChaos contains the preparation & injection steps -func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - +// PrepareAndInjectChaos contains the preparation & injection steps +func PrepareAndInjectChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodDNSFault") + defer span.End() // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage if experimentsDetails.TargetPods == "" && chaosDetails.AppDetail == nil { @@ -65,11 +69,11 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -80,11 +84,13 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails } // injectChaosInSerialMode inject the DNS Chaos in all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodDNSFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -103,7 +109,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai "ContainerName": experimentsDetails.TargetContainer, }) runID := stringutils.GetRunID() - if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -136,12 +142,14 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode inject the DNS Chaos in all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodDNSFaultInParallelMode") + defer span.End() var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -155,7 +163,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer)) } - if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -192,8 +200,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { - +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreatePodDNSFaultHelperPod") + defer span.End() privilegedEnable := true terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) @@ -235,7 +244,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie "./helpers -name dns-chaos", }, Resources: chaosDetails.Resources, - Env: getPodEnv(experimentsDetails, targets), + Env: getPodEnv(ctx, experimentsDetails, targets), VolumeMounts: []apiv1.VolumeMount{ { Name: "cri-socket", @@ -263,7 +272,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } // getPodEnv derive all the env required for the helper pod -func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { +func getPodEnv(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { var envDetails common.ENVDetails envDetails.SetEnv("TARGETS", targets). @@ -279,6 +288,8 @@ func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets st SetEnv("MATCH_SCHEME", experimentsDetails.MatchScheme). SetEnv("CHAOS_TYPE", experimentsDetails.ChaosType). SetEnv("INSTANCE_ID", experimentsDetails.InstanceID). + SetEnv("OTEL_EXPORTER_OTLP_ENDPOINT", os.Getenv(telemetry.OTELExporterOTLPEndpoint)). + SetEnv("TRACE_PARENT", telemetry.GetMarshalledSpanFromContext(ctx)). SetEnvFromDownwardAPI("v1", "metadata.name") return envDetails.ENV diff --git a/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go b/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go index a289bd5ba..56cecf6e1 100644 --- a/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go +++ b/chaoslib/litmus/pod-fio-stress/lib/pod-fio-stress.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -10,9 +11,11 @@ import ( "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/result" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-fio-stress/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -24,8 +27,10 @@ import ( corev1 "k8s.io/api/core/v1" ) -//PrepareChaos contains the chaos preparation and injection steps -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PrepareChaos contains the chaos preparation and injection steps +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodFIOStressFault") + defer span.End() //Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { @@ -33,7 +38,7 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients common.WaitForDuration(experimentsDetails.RampTime) } //Starting the Fio stress experiment - if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := experimentExecution(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not inject chaos") } //Waiting for the ramp time after chaos injection @@ -66,8 +71,8 @@ func stressStorage(experimentDetails *experimentTypes.ExperimentDetails, podName stressErr <- err } -//experimentExecution function orchestrates the experiment by calling the StressStorage function, of every container, of every pod that is targeted -func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// experimentExecution function orchestrates the experiment by calling the StressStorage function, of every container, of every pod that is targeted +func experimentExecution(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage @@ -89,11 +94,11 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -104,12 +109,15 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, } // injectChaosInSerialMode stressed the storage of all target application in serial mode (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodFIOStressFaultInSerialMode") + defer span.End() + // creating err channel to receive the error from the go routine stressErr := make(chan error) // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -185,12 +193,15 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode stressed the storage of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodFIOStressFaultInParallelMode") + defer span.End() + // creating err channel to receive the error from the go routine stressErr := make(chan error) // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -265,7 +276,8 @@ loop: } // killStressSerial function to kill a stress process running inside target container -// Triggered by either timeout of chaos duration or termination of the experiment +// +// Triggered by either timeout of chaos duration or termination of the experiment func killStressSerial(containerName, podName, namespace, KillCmd string, clients clients.ClientSets) error { // It will contain all the pod & container details required for exec command execCommandDetails := litmusexec.PodDetails{} diff --git a/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go b/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go index 18aec9a66..cbd9c0f4f 100644 --- a/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go +++ b/chaoslib/litmus/pod-memory-hog-exec/lib/pod-memory-hog-exec.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -10,9 +11,11 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-memory-hog-exec/types" "github.com/litmuschaos/litmus-go/pkg/log" @@ -27,8 +30,10 @@ import ( var inject chan os.Signal -//PrepareMemoryExecStress contains the chaos preparation and injection steps -func PrepareMemoryExecStress(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PrepareMemoryExecStress contains the chaos preparation and injection steps +func PrepareMemoryExecStress(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodMemoryHogExecFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -41,7 +46,7 @@ func PrepareMemoryExecStress(experimentsDetails *experimentTypes.ExperimentDetai common.WaitForDuration(experimentsDetails.RampTime) } //Starting the Memory stress experiment - if err := experimentMemory(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := experimentMemory(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not stress memory") } //Waiting for the ramp time after chaos injection @@ -71,8 +76,8 @@ func stressMemory(MemoryConsumption, containerName, podName, namespace string, c stressErr <- err } -//experimentMemory function orchestrates the experiment by calling the StressMemory function, of every container, of every pod that is targeted -func experimentMemory(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// experimentMemory function orchestrates the experiment by calling the StressMemory function, of every container, of every pod that is targeted +func experimentMemory(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage @@ -94,11 +99,11 @@ func experimentMemory(experimentsDetails *experimentTypes.ExperimentDetails, cli experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -109,11 +114,13 @@ func experimentMemory(experimentsDetails *experimentTypes.ExperimentDetails, cli } // injectChaosInSerialMode stressed the memory of all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodMemoryHogExecFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -203,12 +210,15 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode stressed the memory of all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodMemoryHogExecFaultInParallelMode") + defer span.End() + // creating err channel to receive the error from the go routine stressErr := make(chan error) // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -291,7 +301,8 @@ loop: } // killStressMemorySerial function to kill a stress process running inside target container -// Triggered by either timeout of chaos duration or termination of the experiment +// +// Triggered by either timeout of chaos duration or termination of the experiment func killStressMemorySerial(containerName, podName, namespace, memFreeCmd string, clients clients.ClientSets, chaosDetails *types.ChaosDetails) error { // It will contains all the pod & container details required for exec command execCommandDetails := litmusexec.PodDetails{} diff --git a/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go b/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go index 7fb31b04b..ac1b37717 100644 --- a/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go +++ b/chaoslib/litmus/pod-network-partition/lib/pod-network-partition.go @@ -10,7 +10,9 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/types" @@ -31,8 +33,10 @@ var ( inject, abort chan os.Signal ) -//PrepareAndInjectChaos contains the prepration & injection steps -func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +// PrepareAndInjectChaos contains the prepration & injection steps +func PrepareAndInjectChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodNetworkPartitionFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -91,7 +95,7 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -102,7 +106,7 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails os.Exit(0) default: // creating the network policy to block the traffic - if err := createNetworkPolicy(experimentsDetails, clients, np, runID); err != nil { + if err := createNetworkPolicy(ctx, experimentsDetails, clients, np, runID); err != nil { return stacktrace.Propagate(err, "could not create network policy") } // updating chaos status to injected for the target pods @@ -140,7 +144,9 @@ func PrepareAndInjectChaos(experimentsDetails *experimentTypes.ExperimentDetails // createNetworkPolicy creates the network policy in the application namespace // it blocks ingress/egress traffic for the targeted application for specific/all IPs -func createNetworkPolicy(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, networkPolicy *NetworkPolicy, runID string) error { +func createNetworkPolicy(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, networkPolicy *NetworkPolicy, runID string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodNetworkPartitionFault") + defer span.End() np := &networkv1.NetworkPolicy{ ObjectMeta: v1.ObjectMeta{ diff --git a/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go b/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go index 659179b55..61fa9c44d 100644 --- a/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go +++ b/chaoslib/litmus/redfish-node-restart/lib/redfish-node-restart.go @@ -1,32 +1,38 @@ package lib import ( + "context" "fmt" "time" redfishLib "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish" experimentTypes "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish-node-restart/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) // injectChaos initiates node restart chaos on the target node -func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error { +func injectChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectRedfishNodeRestartFault") + defer span.End() + URL := fmt.Sprintf("https://%v/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset", experimentsDetails.IPMIIP) return redfishLib.RebootNode(URL, experimentsDetails.User, experimentsDetails.Password) } // experimentExecution function orchestrates the experiment by calling the injectChaos function -func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func experimentExecution(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -37,7 +43,7 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, events.GenerateEvents(eventsDetails, clients, chaosDetails, "ChaosEngine") } - if err := injectChaos(experimentsDetails, clients); err != nil { + if err := injectChaos(ctx, experimentsDetails, clients); err != nil { return stacktrace.Propagate(err, "chaos injection failed") } @@ -47,7 +53,9 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, } // PrepareChaos contains the chaos prepration and injection steps -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareRedfishNodeRestartFault") + defer span.End() //Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { @@ -55,7 +63,7 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients common.WaitForDuration(experimentsDetails.RampTime) } //Starting the Redfish node restart experiment - if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := experimentExecution(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return err } common.SetTargets(experimentsDetails.IPMIIP, "targeted", "node", chaosDetails) diff --git a/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go b/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go index cb5bbb485..6980608a6 100644 --- a/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go +++ b/chaoslib/litmus/spring-boot-chaos/lib/spring-boot-chaos.go @@ -2,6 +2,7 @@ package lib import ( "bytes" + "context" "encoding/json" "fmt" "net/http" @@ -12,7 +13,9 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" corev1 "k8s.io/api/core/v1" "github.com/litmuschaos/litmus-go/pkg/clients" @@ -51,7 +54,10 @@ func SetTargetPodList(experimentsDetails *experimentTypes.ExperimentDetails, cli } // PrepareChaos contains the preparation steps before chaos injection -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareSpringBootFault") + defer span.End() + // Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { log.Infof("[Ramp]: Waiting for the %vs ramp time before injecting chaos", experimentsDetails.RampTime) @@ -69,11 +75,11 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err := injectChaosInSerialMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { + if err := injectChaosInSerialMode(ctx, experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err := injectChaosInParallelMode(experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { + if err := injectChaosInParallelMode(ctx, experimentsDetails, clients, chaosDetails, eventsDetails, resultDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -187,7 +193,7 @@ func setChaosMonkeyAssault(chaosMonkeyPort string, chaosMonkeyPath string, assau } // disableChaosMonkey disables chaos monkey on selected pods -func disableChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, pod corev1.Pod) error { +func disableChaosMonkey(ctx context.Context, chaosMonkeyPort string, chaosMonkeyPath string, pod corev1.Pod) error { log.Infof("[Chaos]: disabling assaults on pod %s", pod.Name) jsonValue, err := json.Marshal(revertAssault) if err != nil { @@ -211,11 +217,13 @@ func disableChaosMonkey(chaosMonkeyPort string, chaosMonkeyPath string, pod core } // injectChaosInSerialMode injects chaos monkey assault on pods in serial mode(one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectSpringBootFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -269,7 +277,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai select { case <-signChan: log.Info("[Chaos]: Revert Started") - if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { + if err := disableChaosMonkey(ctx, experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { log.Errorf("Error in disabling chaos monkey, err: %v", err) } else { common.SetTargets(pod.Name, "reverted", "pod", chaosDetails) @@ -287,7 +295,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } } - if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { + if err := disableChaosMonkey(ctx, experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { return err } @@ -299,11 +307,13 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode injects chaos monkey assault on pods in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, eventsDetails *types.EventDetails, resultDetails *types.ResultDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectSpringBootFaultInParallelMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -358,7 +368,7 @@ loop: case <-signChan: log.Info("[Chaos]: Revert Started") for _, pod := range experimentsDetails.TargetPodList.Items { - if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { + if err := disableChaosMonkey(ctx, experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { log.Errorf("Error in disabling chaos monkey, err: %v", err) } else { common.SetTargets(pod.Name, "reverted", "pod", chaosDetails) @@ -379,7 +389,7 @@ loop: var errorList []string for _, pod := range experimentsDetails.TargetPodList.Items { - if err := disableChaosMonkey(experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { + if err := disableChaosMonkey(ctx, experimentsDetails.ChaosMonkeyPort, experimentsDetails.ChaosMonkeyPath, pod); err != nil { errorList = append(errorList, err.Error()) continue } diff --git a/chaoslib/litmus/stress-chaos/helper/stress-helper.go b/chaoslib/litmus/stress-chaos/helper/stress-helper.go index c353c3eb5..c2f62bcc2 100644 --- a/chaoslib/litmus/stress-chaos/helper/stress-helper.go +++ b/chaoslib/litmus/stress-chaos/helper/stress-helper.go @@ -3,9 +3,12 @@ package helper import ( "bufio" "bytes" + "context" "fmt" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" "io" "os" "os/exec" @@ -51,7 +54,9 @@ const ( ) // Helper injects the stress chaos -func Helper(clients clients.ClientSets) { +func Helper(ctx context.Context, clients clients.ClientSets) { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "SimulatePodStressFault") + defer span.End() experimentsDetails := experimentTypes.ExperimentDetails{} eventsDetails := types.EventDetails{} @@ -107,7 +112,6 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c var ( targets []targetDetails - groupPath = "" ) for _, t := range targetList.Target { @@ -129,7 +133,7 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c return stacktrace.Propagate(err, "could not get container pid") } - td.CGroupManager, err, groupPath = getCGroupManager(td) + td.CGroupManager, err, td.GroupPath = getCGroupManager(td) if err != nil { return stacktrace.Propagate(err, "could not get cgroup manager") } @@ -149,7 +153,7 @@ func prepareStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, c done := make(chan error, 1) for index, t := range targets { - targets[index].Cmd, err = injectChaos(t, stressors, groupPath) + targets[index].Cmd, err = injectChaos(t, stressors, experimentsDetails.StressType) if err != nil { return stacktrace.Propagate(err, "could not inject chaos") } @@ -499,18 +503,8 @@ func abortWatcher(targets []targetDetails, resultName, chaosNS string) { // getCGroupManager will return the cgroup for the given pid of the process func getCGroupManager(t targetDetails) (interface{}, error, string) { if cgroups.Mode() == cgroups.Unified { - //groupPath, err := cgroupsv2.PidGroupPath(t.Pid) - //if err != nil { - // return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to get pid group path: %s", err.Error())} - //} - // - //cgroup2, err := cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath) - //if err != nil { - // return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeHelper, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to load the cgroup: %s", err.Error())} - //} - //return cgroup2, nil groupPath := "" - output, err := exec.Command("bash", "-c", fmt.Sprintf("nsenter -t 1 -C -m -- cat /proc/%v/cgroup", t.Pid)).CombinedOutput() + output, err := exec.Command("bash", "-c", fmt.Sprintf("nsenter -t 1 -C -m -- cat /proc/%v/cgroup", t.Pids[index])).CombinedOutput() if err != nil { return nil, errors.Errorf("Error in getting groupPath,%s", string(output)), "" } @@ -521,6 +515,7 @@ func getCGroupManager(t targetDetails) (interface{}, error, string) { if parts[0] == "0" && parts[1] == "" { groupPath = parts[2] } + log.Infof("group path: %s", groupPath) cgroup2, err := cgroupsv2.LoadManager("/sys/fs/cgroup", string(groupPath)) @@ -546,14 +541,13 @@ func getCGroupManager(t targetDetails) (interface{}, error, string) { // By default it will add to v1 cgroup func addProcessToCgroup(pid int, control interface{}, groupPath string) error { if cgroups.Mode() == cgroups.Unified { - //var cgroup1 = control.(*cgroupsv2.Manager) - //return cgroup1.AddProc(uint64(pid)) - args := []string{"-t", "1", "-C", "--", "sudo", "sh", "-c", - fmt.Sprintf("echo %d >> /sys/fs/cgroup%s/cgroup.procs", pid, strings.ReplaceAll(groupPath, "\n", ""))} + args := []string{"-t", "1", "-C", "--", "sudo", "sh", "-c", fmt.Sprintf("echo %d >> /sys/fs/cgroup%s/cgroup.procs", pid, strings.ReplaceAll(groupPath, "\n", ""))} output, err := exec.Command("nsenter", args...).CombinedOutput() if err != nil { - logrus.Error(string(output)) - return err + return cerrors.Error{ + ErrorCode: cerrors.ErrorTypeChaosInject, + Reason: fmt.Sprintf("failed to add process to cgroup %s: %v", string(output), err), + } } return nil } @@ -561,8 +555,15 @@ func addProcessToCgroup(pid int, control interface{}, groupPath string) error { return cgroup1.Add(cgroups.Process{Pid: pid}) } -func injectChaos(t targetDetails, stressors, groupPath string) (*exec.Cmd, error) { - stressCommand := "pause nsutil -t " + strconv.Itoa(t.Pid) + " -p -- " + stressors + +func injectChaos(t targetDetails, stressors, stressType string) (*exec.Cmd, error) { + stressCommand := fmt.Sprintf("pause nsutil -t %v -p -- %v", strconv.Itoa(t.Pid), stressors) + // for io stress,we need to enter into mount ns of the target container + // enabling it by passing -m flag + if stressType == "pod-io-stress" { + stressCommand = fmt.Sprintf("pause nsutil -t %v -p -m -- %v", strconv.Itoa(t.Pid), stressors) + } + log.Infof("[Info]: starting process: %v", stressCommand) // launch the stress-ng process on the target container in paused mode @@ -576,7 +577,7 @@ func injectChaos(t targetDetails, stressors, groupPath string) (*exec.Cmd, error } // add the stress process to the cgroup of target container - if err = addProcessToCgroup(cmd.Process.Pid, t.CGroupManager, groupPath); err != nil { + if err = addProcessToCgroup(cmd.Process.Pid, t.CGroupManager, t.GroupPath); err != nil { if killErr := cmd.Process.Kill(); killErr != nil { return nil, cerrors.Error{ErrorCode: cerrors.ErrorTypeChaosInject, Source: t.Source, Target: fmt.Sprintf("{podName: %s, namespace: %s, container: %s}", t.Name, t.Namespace, t.TargetContainer), Reason: fmt.Sprintf("fail to add the stress process to cgroup %s and kill stress process: %s", err.Error(), killErr.Error())} } @@ -604,4 +605,5 @@ type targetDetails struct { CGroupManager interface{} Cmd *exec.Cmd Source string + GroupPath string } diff --git a/chaoslib/litmus/stress-chaos/lib/stress-chaos.go b/chaoslib/litmus/stress-chaos/lib/stress-chaos.go index 88df25491..bfa6c878c 100644 --- a/chaoslib/litmus/stress-chaos/lib/stress-chaos.go +++ b/chaoslib/litmus/stress-chaos/lib/stress-chaos.go @@ -3,13 +3,16 @@ package lib import ( "context" "fmt" + "os" "strconv" "strings" "github.com/litmuschaos/litmus-go/pkg/cerrors" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -22,9 +25,10 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//PrepareAndInjectStressChaos contains the prepration & injection steps for the stress experiments. -func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { - +// PrepareAndInjectStressChaos contains the prepration & injection steps for the stress experiments. +func PrepareAndInjectStressChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PreparePodStressFault") + defer span.End() var err error //Set up the tunables if provided in range SetChaosTunables(experimentsDetails) @@ -89,11 +93,11 @@ func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentD experimentsDetails.IsTargetContainerProvided = experimentsDetails.TargetContainer != "" switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetPodList, clients, chaosDetails, resultDetails, eventsDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -104,11 +108,13 @@ func PrepareAndInjectStressChaos(experimentsDetails *experimentTypes.ExperimentD } // injectChaosInSerialMode inject the stress chaos in all target application serially (one by one) -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodStressFaultInSerialMode") + defer span.End() // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -127,7 +133,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai "ContainerName": experimentsDetails.TargetContainer, }) runID := stringutils.GetRunID() - if err := createHelperPod(experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, fmt.Sprintf("%s:%s:%s", pod.Name, pod.Namespace, experimentsDetails.TargetContainer), pod.Spec.NodeName, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -160,12 +166,14 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode inject the stress chaos in all target application in parallel mode (all at once) -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList apiv1.PodList, clients clients.ClientSets, chaosDetails *types.ChaosDetails, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectPodStressFaultInParallelMode") + defer span.End() var err error // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -179,7 +187,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet targetsPerNode = append(targetsPerNode, fmt.Sprintf("%s:%s:%s", k.Name, k.Namespace, k.TargetContainer)) } - if err := createHelperPod(experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, strings.Join(targetsPerNode, ";"), node, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } } @@ -213,7 +221,9 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "CreatePodStressFaultHelperPod") + defer span.End() privilegedEnable := true terminationGracePeriodSeconds := int64(experimentsDetails.TerminationGracePeriodSeconds) @@ -265,7 +275,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie "./helpers -name stress-chaos", }, Resources: chaosDetails.Resources, - Env: getPodEnv(experimentsDetails, targets), + Env: getPodEnv(ctx, experimentsDetails, targets), VolumeMounts: []apiv1.VolumeMount{ { Name: "socket-path", @@ -303,7 +313,7 @@ func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clie } // getPodEnv derive all the env required for the helper pod -func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { +func getPodEnv(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targets string) []apiv1.EnvVar { var envDetails common.ENVDetails envDetails.SetEnv("TARGETS", targets). @@ -323,6 +333,8 @@ func getPodEnv(experimentsDetails *experimentTypes.ExperimentDetails, targets st SetEnv("VOLUME_MOUNT_PATH", experimentsDetails.VolumeMountPath). SetEnv("STRESS_TYPE", experimentsDetails.StressType). SetEnv("INSTANCE_ID", experimentsDetails.InstanceID). + SetEnv("OTEL_EXPORTER_OTLP_ENDPOINT", os.Getenv(telemetry.OTELExporterOTLPEndpoint)). + SetEnv("TRACE_PARENT", telemetry.GetMarshalledSpanFromContext(ctx)). SetEnvFromDownwardAPI("v1", "metadata.name") return envDetails.ENV @@ -332,8 +344,8 @@ func ptrint64(p int64) *int64 { return &p } -//SetChaosTunables will set up a random value within a given range of values -//If the value is not provided in range it'll set up the initial provided value. +// SetChaosTunables will set up a random value within a given range of values +// If the value is not provided in range it'll set up the initial provided value. func SetChaosTunables(experimentsDetails *experimentTypes.ExperimentDetails) { experimentsDetails.CPUcores = common.ValidateRange(experimentsDetails.CPUcores) experimentsDetails.CPULoad = common.ValidateRange(experimentsDetails.CPULoad) diff --git a/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go b/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go index e30557880..e828be326 100644 --- a/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go +++ b/chaoslib/litmus/vm-poweroff/lib/vm-poweroff.go @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "os/signal" @@ -9,22 +10,25 @@ import ( "time" "github.com/litmuschaos/litmus-go/pkg/cerrors" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/vmware" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" experimentTypes "github.com/litmuschaos/litmus-go/pkg/vmware/vm-poweroff/types" "github.com/palantir/stacktrace" + "go.opentelemetry.io/otel" ) var inject, abort chan os.Signal // InjectVMPowerOffChaos injects the chaos in serial or parallel mode -func InjectVMPowerOffChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, cookie string) error { - +func InjectVMPowerOffChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails, cookie string) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "PrepareVMPowerOffFault") + defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) // Catch and relay certain signal(s) to inject channel. @@ -49,11 +53,11 @@ func InjectVMPowerOffChaos(experimentsDetails *experimentTypes.ExperimentDetails switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err := injectChaosInSerialMode(experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := injectChaosInSerialMode(ctx, experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err := injectChaosInParallelMode(experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err := injectChaosInParallelMode(ctx, experimentsDetails, vmIdList, cookie, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -70,7 +74,10 @@ func InjectVMPowerOffChaos(experimentsDetails *experimentTypes.ExperimentDetails } // injectChaosInSerialMode stops VMs in serial mode i.e. one after the other -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, vmIdList []string, cookie string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, vmIdList []string, cookie string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "injectVMPowerOffFaultInSerialMode") + defer span.End() + select { case <-inject: // stopping the chaos execution, if abort signal received @@ -109,7 +116,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai //Run the probes during the chaos //The OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } @@ -141,7 +148,9 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode stops VMs in parallel mode i.e. all at once -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, vmIdList []string, cookie string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, vmIdList []string, cookie string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "injectVMPowerOffFaultInParallelMode") + defer span.End() select { case <-inject: @@ -184,7 +193,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet //Running the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return stacktrace.Propagate(err, "failed to run probes") } } diff --git a/contribute/developer-guide/templates/chaoslib_exec.tmpl b/contribute/developer-guide/templates/chaoslib_exec.tmpl index e4e3afcb2..a5a6d5704 100644 --- a/contribute/developer-guide/templates/chaoslib_exec.tmpl +++ b/contribute/developer-guide/templates/chaoslib_exec.tmpl @@ -1,6 +1,7 @@ package lib import ( + "context" "fmt" "os" "github.com/litmuschaos/litmus-go/pkg/cerrors" @@ -32,7 +33,7 @@ func injectChaos(experimentsDetails *experimentTypes.ExperimentDetails, podName return nil } -func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func experimentExecution(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage @@ -53,13 +54,13 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, } log.Infof("Target pods list for chaos, %v", podNames) - return runChaos(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails) + return runChaos(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails) } -func runChaos(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func runChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -120,7 +121,10 @@ func runChaos(experimentsDetails *experimentTypes.ExperimentDetails, targetPodLi } //PrepareChaos contains the preparation steps before chaos injection -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + // @TODO: setup tracing + // ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "InjectChaos") + // defer span.End() //Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { @@ -128,7 +132,7 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients common.WaitForDuration(experimentsDetails.RampTime) } //Starting the CPU stress experiment - if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails);err != nil { + if err := experimentExecution(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails);err != nil { return stacktrace.Propagate(err, "could not execute experiment") } //Waiting for the ramp time after chaos injection diff --git a/contribute/developer-guide/templates/chaoslib_helper.tmpl b/contribute/developer-guide/templates/chaoslib_helper.tmpl index 290e2b557..92b091f07 100644 --- a/contribute/developer-guide/templates/chaoslib_helper.tmpl +++ b/contribute/developer-guide/templates/chaoslib_helper.tmpl @@ -19,7 +19,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func experimentExecution(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { // Get the target pod details for the chaos execution // if the target pod is not defined it will derive the random target pod list using pod affected percentage @@ -54,10 +54,10 @@ func experimentExecution(experimentsDetails *experimentTypes.ExperimentDetails, } } - return runChaos(experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails) + return runChaos(ctx, experimentsDetails, targetPodList, clients, resultDetails, eventsDetails, chaosDetails) } -func runChaos(experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func runChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetPodList corev1.PodList, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { if experimentsDetails.EngineName != "" { msg := "Injecting " + experimentsDetails.ExperimentName + " chaos on target pod" types.SetEngineEventAttributes(eventsDetails, types.ChaosInject, msg, "Normal", chaosDetails) @@ -66,7 +66,7 @@ func runChaos(experimentsDetails *experimentTypes.ExperimentDetails, targetPodLi // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -89,7 +89,7 @@ func runChaos(experimentsDetails *experimentTypes.ExperimentDetails, targetPodLi "Target Container": experimentsDetails.TargetContainer, }) - if err := createHelperPod(experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { + if err := createHelperPod(ctx, experimentsDetails, clients, chaosDetails, pod.Name, pod.Spec.NodeName, runID); err != nil { return stacktrace.Propagate(err, "could not create helper pod") } @@ -124,7 +124,10 @@ func runChaos(experimentsDetails *experimentTypes.ExperimentDetails, targetPodLi } //PrepareChaos contains the preparation steps before chaos injection -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + // @TODO: setup tracing + // ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "Prepare[name-your-chaos]Fault") + // defer span.End() //Waiting for the ramp time before chaos injection if experimentsDetails.RampTime != 0 { @@ -132,7 +135,7 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients common.WaitForDuration(experimentsDetails.RampTime) } //Starting the CPU stress experiment - if err := experimentExecution(experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails);err != nil { + if err := experimentExecution(ctx, experimentsDetails, clients, resultDetails, eventsDetails, chaosDetails);err != nil { return stacktrace.Propagate(err, "could not execute chaos") } //Waiting for the ramp time after chaos injection @@ -144,7 +147,10 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients } // createHelperPod derive the attributes for helper pod and create the helper pod -func createHelperPod(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { +func createHelperPod(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, targets, nodeName, runID string) error { + // @TODO: setup tracing + // ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "Create[name-your-chaos]FaultHelperPod") + // defer span.End() helperPod := &corev1.Pod{ ObjectMeta: v1.ObjectMeta{ diff --git a/contribute/developer-guide/templates/chaoslib_non-k8s.tmpl b/contribute/developer-guide/templates/chaoslib_non-k8s.tmpl index 3b2954aaa..717d39438 100644 --- a/contribute/developer-guide/templates/chaoslib_non-k8s.tmpl +++ b/contribute/developer-guide/templates/chaoslib_non-k8s.tmpl @@ -1,6 +1,7 @@ package lib import ( + "context" "os" "os/signal" "strings" @@ -22,7 +23,10 @@ var ( ) //PrepareChaos contains the preparation and injection steps for the experiment -func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func PrepareChaos(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + // @TODO: setup tracing + // ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "Prepare[name-your-chaos]Fault") + // defer span.End() // inject channel is used to transmit signal notifications. inject = make(chan os.Signal, 1) @@ -53,11 +57,11 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients switch strings.ToLower(experimentsDetails.Sequence) { case "serial": - if err = injectChaosInSerialMode(experimentsDetails, targetIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInSerialMode(ctx, experimentsDetails, targetIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in serial mode") } case "parallel": - if err = injectChaosInParallelMode(experimentsDetails, targetIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { + if err = injectChaosInParallelMode(ctx, experimentsDetails, targetIDList, clients, resultDetails, eventsDetails, chaosDetails); err != nil { return stacktrace.Propagate(err, "could not run chaos in parallel mode") } default: @@ -73,7 +77,10 @@ func PrepareChaos(experimentsDetails *experimentTypes.ExperimentDetails, clients } //injectChaosInSerialMode will inject the chaos on the target one after other -func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetails, targetIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInSerialMode(ctx context.Contxt, experimentsDetails *experimentTypes.ExperimentDetails, targetIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + // @TODO: setup tracing + // ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "Inject[name-your-chaos]FaultInSerialMode") + // defer span.End() select { case <-inject: @@ -111,7 +118,7 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai // The OnChaos probes execution will start in the first iteration and keep running for the entire chaos duration if len(resultDetails.ProbeDetails) != 0 && i == 0 { - if err = probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err = probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } @@ -136,7 +143,10 @@ func injectChaosInSerialMode(experimentsDetails *experimentTypes.ExperimentDetai } // injectChaosInParallelMode will inject the chaos on the target all at once -func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDetails, targetIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { +func injectChaosInParallelMode(ctx context.Context, experimentsDetails *experimentTypes.ExperimentDetails, targetIDList []string, clients clients.ClientSets, resultDetails *types.ResultDetails, eventsDetails *types.EventDetails, chaosDetails *types.ChaosDetails) error { + // @TODO: setup tracing + // ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "Inject[name-your-chaos]FaultInParallelMode") + // defer span.End() select { case <-inject: @@ -177,7 +187,7 @@ func injectChaosInParallelMode(experimentsDetails *experimentTypes.ExperimentDet // run the probes during chaos if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { + if err := probe.RunProbes(ctx, chaosDetails, clients, resultDetails, "DuringChaos", eventsDetails); err != nil { return err } } diff --git a/contribute/developer-guide/templates/experiment_aws.tmpl b/contribute/developer-guide/templates/experiment_aws.tmpl index b1920a590..7104ac807 100644 --- a/contribute/developer-guide/templates/experiment_aws.tmpl +++ b/contribute/developer-guide/templates/experiment_aws.tmpl @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/{{ .Name }}/lib" @@ -19,7 +20,7 @@ import ( ) // Experiment contains steps to inject chaos -func Experiment(clients clients.ClientSets){ +func Experiment(ctx context.Context, clients clients.ClientSets){ experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -111,7 +112,7 @@ func Experiment(clients clients.ClientSets){ // @TODO: user INVOKE-CHAOSLIB chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -142,7 +143,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/contribute/developer-guide/templates/experiment_azure.tmpl b/contribute/developer-guide/templates/experiment_azure.tmpl index 7d8a76a18..9cf17e378 100644 --- a/contribute/developer-guide/templates/experiment_azure.tmpl +++ b/contribute/developer-guide/templates/experiment_azure.tmpl @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -20,7 +21,7 @@ import ( ) // Experiment contains steps to inject chaos -func Experiment(clients clients.ClientSets){ +func Experiment(ctx context.Context, clients clients.ClientSets){ experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -99,7 +100,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -120,7 +121,7 @@ func Experiment(clients clients.ClientSets){ // @TODO: user INVOKE-CHAOSLIB chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -148,7 +149,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/contribute/developer-guide/templates/experiment_gcp.tmpl b/contribute/developer-guide/templates/experiment_gcp.tmpl index c22151420..20bbe81b4 100644 --- a/contribute/developer-guide/templates/experiment_gcp.tmpl +++ b/contribute/developer-guide/templates/experiment_gcp.tmpl @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -98,7 +99,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -119,7 +120,7 @@ func Experiment(clients clients.ClientSets){ // @TODO: user INVOKE-CHAOSLIB chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) @@ -149,7 +150,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/contribute/developer-guide/templates/experiment_k8s.tmpl b/contribute/developer-guide/templates/experiment_k8s.tmpl index bd33e4a78..85f29a6ef 100644 --- a/contribute/developer-guide/templates/experiment_k8s.tmpl +++ b/contribute/developer-guide/templates/experiment_k8s.tmpl @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -19,7 +20,7 @@ import ( ) // Experiment contains steps to inject chaos -func Experiment(clients clients.ClientSets){ +func Experiment(ctx context.Context, clients clients.ClientSets){ experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -104,7 +105,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -125,7 +126,7 @@ func Experiment(clients clients.ClientSets){ // @TODO: user INVOKE-CHAOSLIB chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -168,7 +169,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/contribute/developer-guide/templates/experiment_vmware.tmpl b/contribute/developer-guide/templates/experiment_vmware.tmpl index 38f64a162..aad8e769a 100644 --- a/contribute/developer-guide/templates/experiment_vmware.tmpl +++ b/contribute/developer-guide/templates/experiment_vmware.tmpl @@ -1,8 +1,10 @@ package experiment import ( + "context" "os" + "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/{{ .Name }}/lib" clients "github.com/litmuschaos/litmus-go/pkg/clients" @@ -19,7 +21,7 @@ import ( ) // Experiment contains steps to inject chaos -func Experiment(clients clients.ClientSets){ +func Experiment(ctx context.Context, clients clients.ClientSets){ experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -97,7 +99,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails);err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -118,7 +120,7 @@ func Experiment(clients clients.ClientSets){ // @TODO: user INVOKE-CHAOSLIB chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error() result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails) @@ -148,7 +150,7 @@ func Experiment(clients clients.ClientSets){ // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails);err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go b/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go index cf3445cf2..4edeaf6d3 100644 --- a/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go +++ b/experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go @@ -1,13 +1,14 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib/ssm" experimentEnv "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" ec2 "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm" "github.com/litmuschaos/litmus-go/pkg/events" @@ -20,7 +21,7 @@ import ( ) // AWSSSMChaosByID inject the ssm chaos on ec2 instance -func AWSSSMChaosByID(clients clients.ClientSets) { +func AWSSSMChaosByID(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -80,7 +81,7 @@ func AWSSSMChaosByID(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -118,7 +119,7 @@ func AWSSSMChaosByID(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareAWSSSMChaosByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareAWSSSMChaosByID(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) //Delete the ssm document on the given aws service monitoring docs @@ -152,7 +153,7 @@ func AWSSSMChaosByID(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go b/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go index d287e4be6..1799ac322 100644 --- a/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go +++ b/experiments/aws-ssm/aws-ssm-chaos-by-tag/experiment/aws-ssm-chaos-by-tag.go @@ -1,13 +1,14 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib/ssm" experimentEnv "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" ec2 "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm" "github.com/litmuschaos/litmus-go/pkg/events" @@ -20,7 +21,7 @@ import ( ) // AWSSSMChaosByTag inject the ssm chaos on ec2 instance -func AWSSSMChaosByTag(clients clients.ClientSets) { +func AWSSSMChaosByTag(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -88,7 +89,7 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -109,7 +110,7 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareAWSSSMChaosByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareAWSSSMChaosByTag(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) //Delete the ssm document on the given aws service monitoring docs @@ -143,7 +144,7 @@ func AWSSSMChaosByTag(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go b/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go index 9887f81bd..85507b7ad 100644 --- a/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go +++ b/experiments/azure/azure-disk-loss/experiment/azure-disk-loss.go @@ -1,13 +1,14 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/azure-disk-loss/lib" experimentEnv "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/disk-loss/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" azureCommon "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" azureStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/disk" "github.com/litmuschaos/litmus-go/pkg/events" @@ -20,7 +21,7 @@ import ( ) // AzureDiskLoss contains steps to inject chaos -func AzureDiskLoss(clients clients.ClientSets) { +func AzureDiskLoss(ctx context.Context, clients clients.ClientSets) { var err error experimentsDetails := experimentTypes.ExperimentDetails{} @@ -99,7 +100,7 @@ func AzureDiskLoss(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -120,7 +121,7 @@ func AzureDiskLoss(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("Chaos injection failed: %v", err) return @@ -147,7 +148,7 @@ func AzureDiskLoss(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/azure/instance-stop/experiment/azure-instance-stop.go b/experiments/azure/instance-stop/experiment/azure-instance-stop.go index a934c5302..74b3424c8 100644 --- a/experiments/azure/instance-stop/experiment/azure-instance-stop.go +++ b/experiments/azure/instance-stop/experiment/azure-instance-stop.go @@ -1,13 +1,14 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/azure-instance-stop/lib" experimentEnv "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/azure/instance-stop/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" azureCommon "github.com/litmuschaos/litmus-go/pkg/cloud/azure/common" azureStatus "github.com/litmuschaos/litmus-go/pkg/cloud/azure/instance" @@ -21,7 +22,7 @@ import ( ) // AzureInstanceStop inject the azure instance stop chaos -func AzureInstanceStop(clients clients.ClientSets) { +func AzureInstanceStop(ctx context.Context, clients clients.ClientSets) { var err error experimentsDetails := experimentTypes.ExperimentDetails{} @@ -90,7 +91,7 @@ func AzureInstanceStop(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails) + err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails) if err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" @@ -122,7 +123,7 @@ func AzureInstanceStop(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PrepareAzureStop(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PrepareAzureStop(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -149,7 +150,7 @@ func AzureInstanceStop(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails) + err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails) if err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" diff --git a/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go b/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go index e0d9a1b45..7b9ae654d 100644 --- a/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go +++ b/experiments/baremetal/redfish-node-restart/experiment/redfish-node-restart.go @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -8,7 +9,7 @@ import ( redfishLib "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish" experimentEnv "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish-node-restart/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/baremetal/redfish-node-restart/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -20,7 +21,7 @@ import ( ) // NodeRestart contains steps to inject chaos -func NodeRestart(clients clients.ClientSets) { +func NodeRestart(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -112,7 +113,7 @@ func NodeRestart(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -129,7 +130,7 @@ func NodeRestart(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("Chaos injection failed, err: %v", err) return @@ -181,7 +182,7 @@ func NodeRestart(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/cassandra/pod-delete/experiment/pod-delete.go b/experiments/cassandra/pod-delete/experiment/pod-delete.go index c4af00d31..6e7417d48 100644 --- a/experiments/cassandra/pod-delete/experiment/pod-delete.go +++ b/experiments/cassandra/pod-delete/experiment/pod-delete.go @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -8,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/cassandra" experimentEnv "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/cassandra/pod-delete/types" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" "github.com/litmuschaos/litmus-go/pkg/probe" @@ -20,7 +21,7 @@ import ( ) // CasssandraPodDelete inject the cassandra-pod-delete chaos -func CasssandraPodDelete(clients clients.ClientSets) { +func CasssandraPodDelete(ctx context.Context, clients clients.ClientSets) { var err error var ResourceVersionBefore string @@ -102,7 +103,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -132,7 +133,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PreparePodDelete(experimentsDetails.ChaoslibDetail, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PreparePodDelete(ctx, experimentsDetails.ChaoslibDetail, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -169,7 +170,7 @@ func CasssandraPodDelete(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go b/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go index 446d6aa3f..644aea20f 100644 --- a/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go +++ b/experiments/gcp/gcp-vm-disk-loss-by-label/experiment/gcp-vm-disk-loss-by-label.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/gcp-vm-disk-loss-by-label/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/environment" @@ -20,7 +21,7 @@ import ( ) // GCPVMDiskLossByLabel contains steps to inject chaos -func GCPVMDiskLossByLabel(clients clients.ClientSets) { +func GCPVMDiskLossByLabel(ctx context.Context, clients clients.ClientSets) { var ( computeService *compute.Service @@ -83,7 +84,7 @@ func GCPVMDiskLossByLabel(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -117,7 +118,7 @@ func GCPVMDiskLossByLabel(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareDiskVolumeLossByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareDiskVolumeLossByLabel(ctx, computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -146,7 +147,7 @@ func GCPVMDiskLossByLabel(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go b/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go index abb182a9e..be1adbc8a 100644 --- a/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go +++ b/experiments/gcp/gcp-vm-disk-loss/experiment/gcp-vm-disk-loss.go @@ -1,12 +1,13 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/gcp-vm-disk-loss/lib" "github.com/litmuschaos/litmus-go/pkg/clients" - gcp "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" + "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-disk-loss/types" @@ -20,7 +21,7 @@ import ( ) // VMDiskLoss injects the disk volume loss chaos -func VMDiskLoss(clients clients.ClientSets) { +func VMDiskLoss(ctx context.Context, clients clients.ClientSets) { var ( computeService *compute.Service @@ -83,7 +84,7 @@ func VMDiskLoss(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -125,7 +126,7 @@ func VMDiskLoss(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PrepareDiskVolumeLoss(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PrepareDiskVolumeLoss(ctx, computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -152,7 +153,7 @@ func VMDiskLoss(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go b/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go index a4075b0ac..30dc7d7c5 100644 --- a/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go +++ b/experiments/gcp/gcp-vm-instance-stop-by-label/experiment/gcp-vm-instance-stop-by-label.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/gcp-vm-instance-stop-by-label/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/gcp" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/gcp/gcp-vm-instance-stop/environment" @@ -20,7 +21,7 @@ import ( ) // GCPVMInstanceStopByLabel contains steps to inject chaos -func GCPVMInstanceStopByLabel(clients clients.ClientSets) { +func GCPVMInstanceStopByLabel(ctx context.Context, clients clients.ClientSets) { var ( computeService *compute.Service @@ -84,7 +85,7 @@ func GCPVMInstanceStopByLabel(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -118,7 +119,7 @@ func GCPVMInstanceStopByLabel(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareVMStopByLabel(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareVMStopByLabel(ctx, computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -146,7 +147,7 @@ func GCPVMInstanceStopByLabel(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go b/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go index 7f3cbfb1b..8da11f7f7 100644 --- a/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go +++ b/experiments/gcp/gcp-vm-instance-stop/experiment/gcp-vm-instance-stop.go @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -20,7 +21,7 @@ import ( ) // VMInstanceStop executes the experiment steps by injecting chaos into the specified vm instances -func VMInstanceStop(clients clients.ClientSets) { +func VMInstanceStop(ctx context.Context, clients clients.ClientSets) { var ( computeService *compute.Service @@ -83,7 +84,7 @@ func VMInstanceStop(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -119,7 +120,7 @@ func VMInstanceStop(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareVMStop(computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareVMStop(ctx, computeService, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -147,7 +148,7 @@ func VMInstanceStop(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/container-kill/experiment/container-kill.go b/experiments/generic/container-kill/experiment/container-kill.go index 7c06c37fc..05d3f8f80 100644 --- a/experiments/generic/container-kill/experiment/container-kill.go +++ b/experiments/generic/container-kill/experiment/container-kill.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/container-kill/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/container-kill/types" @@ -19,7 +20,7 @@ import ( ) // ContainerKill inject the container-kill chaos -func ContainerKill(clients clients.ClientSets) { +func ContainerKill(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func ContainerKill(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func ContainerKill(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareContainerKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareContainerKill(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func ContainerKill(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/disk-fill/experiment/disk-fill.go b/experiments/generic/disk-fill/experiment/disk-fill.go index 4e8a73741..19546d0e6 100644 --- a/experiments/generic/disk-fill/experiment/disk-fill.go +++ b/experiments/generic/disk-fill/experiment/disk-fill.go @@ -1,9 +1,12 @@ package experiment import ( + "context" + "os" + "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/disk-fill/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/disk-fill/types" @@ -14,11 +17,10 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" - "os" ) // DiskFill inject the disk-fill chaos -func DiskFill(clients clients.ClientSets) { +func DiskFill(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -88,7 +90,7 @@ func DiskFill(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -104,7 +106,7 @@ func DiskFill(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareDiskFill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareDiskFill(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -132,7 +134,7 @@ func DiskFill(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go b/experiments/generic/docker-service-kill/experiment/docker-service-kill.go index f93156e4f..06ed1d565 100644 --- a/experiments/generic/docker-service-kill/experiment/docker-service-kill.go +++ b/experiments/generic/docker-service-kill/experiment/docker-service-kill.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/docker-service-kill/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/docker-service-kill/types" @@ -19,7 +20,7 @@ import ( ) // DockerServiceKill inject the docker-service-kill chaos -func DockerServiceKill(clients clients.ClientSets) { +func DockerServiceKill(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -107,7 +108,7 @@ func DockerServiceKill(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -123,7 +124,7 @@ func DockerServiceKill(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareDockerServiceKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareDockerServiceKill(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("Chaos injection failed, err: %v", err) return @@ -167,7 +168,7 @@ func DockerServiceKill(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go b/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go index 2aaebafd1..c78f065ed 100644 --- a/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go +++ b/experiments/generic/kubelet-service-kill/experiment/kubelet-service-kill.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/kubelet-service-kill/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/kubelet-service-kill/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/kubelet-service-kill/types" @@ -19,7 +20,7 @@ import ( ) // KubeletServiceKill inject the kubelet-service-kill chaos -func KubeletServiceKill(clients clients.ClientSets) { +func KubeletServiceKill(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -107,7 +108,7 @@ func KubeletServiceKill(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -123,7 +124,7 @@ func KubeletServiceKill(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareKubeletKill(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareKubeletKill(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -167,7 +168,7 @@ func KubeletServiceKill(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go b/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go index a035acfdf..5fbd0248a 100644 --- a/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go +++ b/experiments/generic/node-cpu-hog/experiment/node-cpu-hog.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-cpu-hog/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-cpu-hog/types" @@ -19,7 +20,7 @@ import ( ) // NodeCPUHog inject the node-cpu-hog chaos -func NodeCPUHog(clients clients.ClientSets) { +func NodeCPUHog(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -108,7 +109,7 @@ func NodeCPUHog(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -124,7 +125,7 @@ func NodeCPUHog(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareNodeCPUHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareNodeCPUHog(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: CPU hog failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -168,7 +169,7 @@ func NodeCPUHog(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/node-drain/experiment/node-drain.go b/experiments/generic/node-drain/experiment/node-drain.go index 91876c45a..9b6533b17 100644 --- a/experiments/generic/node-drain/experiment/node-drain.go +++ b/experiments/generic/node-drain/experiment/node-drain.go @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -18,8 +19,8 @@ import ( "github.com/sirupsen/logrus" ) -//NodeDrain inject the node-drain chaos -func NodeDrain(clients clients.ClientSets) { +// NodeDrain inject the node-drain chaos +func NodeDrain(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -107,7 +108,7 @@ func NodeDrain(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -123,7 +124,7 @@ func NodeDrain(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareNodeDrain(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareNodeDrain(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -167,7 +168,7 @@ func NodeDrain(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/node-io-stress/experiment/node-io-stress.go b/experiments/generic/node-io-stress/experiment/node-io-stress.go index 1b76fa841..a58e0f2e5 100644 --- a/experiments/generic/node-io-stress/experiment/node-io-stress.go +++ b/experiments/generic/node-io-stress/experiment/node-io-stress.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-io-stress/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-io-stress/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-io-stress/types" @@ -19,7 +20,7 @@ import ( ) // NodeIOStress inject the node-io-stress chaos -func NodeIOStress(clients clients.ClientSets) { +func NodeIOStress(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -110,7 +111,7 @@ func NodeIOStress(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -126,7 +127,7 @@ func NodeIOStress(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareNodeIOStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareNodeIOStress(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: node io stress failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -170,7 +171,7 @@ func NodeIOStress(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go b/experiments/generic/node-memory-hog/experiment/node-memory-hog.go index 3c5dea5b8..cd040c5a0 100644 --- a/experiments/generic/node-memory-hog/experiment/node-memory-hog.go +++ b/experiments/generic/node-memory-hog/experiment/node-memory-hog.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-memory-hog/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-memory-hog/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-memory-hog/types" @@ -19,7 +20,7 @@ import ( ) // NodeMemoryHog inject the node-memory-hog chaos -func NodeMemoryHog(clients clients.ClientSets) { +func NodeMemoryHog(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -109,7 +110,7 @@ func NodeMemoryHog(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -125,7 +126,7 @@ func NodeMemoryHog(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareNodeMemoryHog(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareNodeMemoryHog(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: node memory hog failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -169,7 +170,7 @@ func NodeMemoryHog(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/node-restart/experiment/node-restart.go b/experiments/generic/node-restart/experiment/node-restart.go index cf6084a07..788836f76 100644 --- a/experiments/generic/node-restart/experiment/node-restart.go +++ b/experiments/generic/node-restart/experiment/node-restart.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-restart/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-restart/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-restart/types" @@ -19,7 +20,7 @@ import ( ) // NodeRestart inject the node-restart chaos -func NodeRestart(clients clients.ClientSets) { +func NodeRestart(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -107,7 +108,7 @@ func NodeRestart(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -123,7 +124,7 @@ func NodeRestart(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareNodeRestart(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareNodeRestart(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: Node restart failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -167,7 +168,7 @@ func NodeRestart(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/node-taint/experiment/node-taint.go b/experiments/generic/node-taint/experiment/node-taint.go index 834c36119..9468e1c9d 100644 --- a/experiments/generic/node-taint/experiment/node-taint.go +++ b/experiments/generic/node-taint/experiment/node-taint.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/node-taint/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/node-taint/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/node-taint/types" @@ -19,7 +20,7 @@ import ( ) // NodeTaint inject the node-taint chaos -func NodeTaint(clients clients.ClientSets) { +func NodeTaint(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -108,7 +109,7 @@ func NodeTaint(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -124,7 +125,7 @@ func NodeTaint(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareNodeTaint(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareNodeTaint(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -168,7 +169,7 @@ func NodeTaint(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "NUT: Ready, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go b/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go index 5930959db..b7b78b8c5 100644 --- a/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go +++ b/experiments/generic/pod-autoscaler/experiment/pod-autoscaler.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-autoscaler/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-autoscaler/types" @@ -19,7 +20,7 @@ import ( ) // PodAutoscaler inject the pod-autoscaler chaos -func PodAutoscaler(clients clients.ClientSets) { +func PodAutoscaler(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -91,7 +92,7 @@ func PodAutoscaler(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -107,7 +108,7 @@ func PodAutoscaler(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PreparePodAutoscaler(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PreparePodAutoscaler(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -136,7 +137,7 @@ func PodAutoscaler(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go b/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go index 49b7c2b75..396d74c04 100644 --- a/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go +++ b/experiments/generic/pod-cpu-hog-exec/experiment/pod-cpu-hog-exec.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-cpu-hog-exec/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-cpu-hog-exec/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-cpu-hog-exec/types" @@ -19,7 +20,7 @@ import ( ) // PodCPUHogExec inject the pod-cpu-hog-exec chaos -func PodCPUHogExec(clients clients.ClientSets) { +func PodCPUHogExec(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodCPUHogExec(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodCPUHogExec(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareCPUExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareCPUExecStress(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: CPU hog failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodCPUHogExec(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go b/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go index ded331c6c..6ad9c212a 100644 --- a/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go +++ b/experiments/generic/pod-cpu-hog/experiment/pod-cpu-hog.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodCPUHog inject the pod-cpu-hog chaos -func PodCPUHog(clients clients.ClientSets) { +func PodCPUHog(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodCPUHog(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodCPUHog(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareAndInjectStressChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: CPU hog failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodCPUHog(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-delete/experiment/pod-delete.go b/experiments/generic/pod-delete/experiment/pod-delete.go index a5801ffef..0fb445f15 100644 --- a/experiments/generic/pod-delete/experiment/pod-delete.go +++ b/experiments/generic/pod-delete/experiment/pod-delete.go @@ -1,6 +1,9 @@ package experiment import ( + "context" + "os" + "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-delete/lib" "github.com/litmuschaos/litmus-go/pkg/clients" @@ -14,12 +17,10 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" "github.com/litmuschaos/litmus-go/pkg/utils/common" "github.com/sirupsen/logrus" - "os" ) // PodDelete inject the pod-delete chaos -func PodDelete(clients clients.ClientSets) { - +func PodDelete(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} eventsDetails := types.EventDetails{} @@ -58,7 +59,7 @@ func PodDelete(clients clients.ClientSets) { return } - // generating the event in chaosresult to marked the verdict as awaited + // generating the event in chaosresult to mark the verdict as awaited msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) if err := events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult"); err != nil { @@ -95,7 +96,7 @@ func PodDelete(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -113,7 +114,7 @@ func PodDelete(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PreparePodDelete(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -141,7 +142,7 @@ func PodDelete(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg = common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go b/experiments/generic/pod-dns-error/experiment/pod-dns-error.go index 4442365c9..a7c739f65 100644 --- a/experiments/generic/pod-dns-error/experiment/pod-dns-error.go +++ b/experiments/generic/pod-dns-error/experiment/pod-dns-error.go @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -19,7 +20,7 @@ import ( ) // PodDNSError contains steps to inject chaos -func PodDNSError(clients clients.ClientSets) { +func PodDNSError(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -91,7 +92,7 @@ func PodDNSError(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -107,7 +108,7 @@ func PodDNSError(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareAndInjectChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -135,7 +136,7 @@ func PodDNSError(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go b/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go index 277680572..145376a64 100644 --- a/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go +++ b/experiments/generic/pod-dns-spoof/experiment/pod-dns-spoof.go @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -19,7 +20,7 @@ import ( ) // PodDNSSpoof contains steps to inject chaos -func PodDNSSpoof(clients clients.ClientSets) { +func PodDNSSpoof(ctx context.Context, clients clients.ClientSets) { var err error experimentsDetails := experimentTypes.ExperimentDetails{} @@ -92,7 +93,7 @@ func PodDNSSpoof(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails) + err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails) if err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") @@ -109,7 +110,7 @@ func PodDNSSpoof(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PrepareAndInjectChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -137,7 +138,7 @@ func PodDNSSpoof(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go b/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go index 2f7348a66..e23a4df08 100644 --- a/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go +++ b/experiments/generic/pod-fio-stress/experiment/pod-fio-stress.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-fio-stress/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-fio-stress/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-fio-stress/types" @@ -19,7 +20,7 @@ import ( ) // Experiment contains steps to inject chaos -func PodFioStress(clients clients.ClientSets) { +func PodFioStress(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -89,7 +90,7 @@ func PodFioStress(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -105,7 +106,7 @@ func PodFioStress(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("Chaos injection failed, err: %v", err) return @@ -133,7 +134,7 @@ func PodFioStress(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go b/experiments/generic/pod-http-latency/experiment/pod-http-latency.go index 03bee5023..7f8a9f1cb 100644 --- a/experiments/generic/pod-http-latency/experiment/pod-http-latency.go +++ b/experiments/generic/pod-http-latency/experiment/pod-http-latency.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/latency" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodHttpLatency inject the pod-http-latency chaos -func PodHttpLatency(clients clients.ClientSets) { +func PodHttpLatency(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodHttpLatency(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodHttpLatency(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodHttpLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodHttpLatencyChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodHttpLatency(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go b/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go index c68b40595..782d539f4 100644 --- a/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go +++ b/experiments/generic/pod-http-modify-body/experiment/pod-http-modify-body.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/modify-body" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodHttpModifyBody contains steps to inject chaos -func PodHttpModifyBody(clients clients.ClientSets) { +func PodHttpModifyBody(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodHttpModifyBody(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodHttpModifyBody(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodHttpModifyBodyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodHttpModifyBodyChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodHttpModifyBody(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go b/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go index cfb634457..f05053481 100644 --- a/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go +++ b/experiments/generic/pod-http-modify-header/experiment/pod-http-modify-header.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/header" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodHttpModifyHeader inject the pod-http-modify-header chaos -func PodHttpModifyHeader(clients clients.ClientSets) { +func PodHttpModifyHeader(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodHttpModifyHeader(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodHttpModifyHeader(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodHttpModifyHeaderChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodHttpModifyHeaderChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodHttpModifyHeader(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go b/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go index 76188c3a1..d5df84fd4 100644 --- a/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go +++ b/experiments/generic/pod-http-reset-peer/experiment/pod-http-reset-peer.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/reset" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodHttpResetPeer contains steps to inject chaos -func PodHttpResetPeer(clients clients.ClientSets) { +func PodHttpResetPeer(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodHttpResetPeer(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodHttpResetPeer(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodHttpResetPeerChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodHttpResetPeerChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodHttpResetPeer(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go b/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go index b37de87df..447b3ef13 100644 --- a/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go +++ b/experiments/generic/pod-http-status-code/experiment/pod-http-status-code.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/http-chaos/lib/statuscode" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/http-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodHttpStatusCode contains steps to inject chaos -func PodHttpStatusCode(clients clients.ClientSets) { +func PodHttpStatusCode(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -98,7 +99,7 @@ func PodHttpStatusCode(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -114,7 +115,7 @@ func PodHttpStatusCode(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodHttpStatusCodeChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodHttpStatusCodeChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -142,7 +143,7 @@ func PodHttpStatusCode(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go b/experiments/generic/pod-io-stress/experiment/pod-io-stress.go index eb6dcdb3f..95b964754 100644 --- a/experiments/generic/pod-io-stress/experiment/pod-io-stress.go +++ b/experiments/generic/pod-io-stress/experiment/pod-io-stress.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodIOStress inject the pod-io-stress chaos -func PodIOStress(clients clients.ClientSets) { +func PodIOStress(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodIOStress(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodIOStress(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareAndInjectStressChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: Pod IO Stress failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodIOStress(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go b/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go index f3a1d4a4f..7cad3f0e3 100644 --- a/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go +++ b/experiments/generic/pod-memory-hog-exec/experiment/pod-memory-hog-exec.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-memory-hog-exec/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-memory-hog-exec/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-memory-hog-exec/types" @@ -19,7 +20,7 @@ import ( ) // PodMemoryHogExec inject the pod-memory-hog-exec chaos -func PodMemoryHogExec(clients clients.ClientSets) { +func PodMemoryHogExec(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodMemoryHogExec(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodMemoryHogExec(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareMemoryExecStress(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareMemoryExecStress(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: pod memory hog failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodMemoryHogExec(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go b/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go index a4bf3cc57..128e177f4 100644 --- a/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go +++ b/experiments/generic/pod-memory-hog/experiment/pod-memory-hog.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/stress-chaos/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/stress-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodMemoryHog inject the pod-memory-hog chaos -func PodMemoryHog(clients clients.ClientSets) { +func PodMemoryHog(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodMemoryHog(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodMemoryHog(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareAndInjectStressChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareAndInjectStressChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("[Error]: pod memory hog failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodMemoryHog(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go b/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go index d3276dbfd..183b2021b 100644 --- a/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go +++ b/experiments/generic/pod-network-corruption/experiment/pod-network-corruption.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/corruption" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodNetworkCorruption inject the pod-network-corruption chaos -func PodNetworkCorruption(clients clients.ClientSets) { +func PodNetworkCorruption(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -91,7 +92,7 @@ func PodNetworkCorruption(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -107,7 +108,7 @@ func PodNetworkCorruption(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodNetworkCorruptionChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodNetworkCorruptionChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -135,7 +136,7 @@ func PodNetworkCorruption(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go b/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go index f37c46369..f1e42c839 100644 --- a/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go +++ b/experiments/generic/pod-network-duplication/experiment/pod-network-duplication.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/duplication" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodNetworkDuplication inject the pod-network-duplication chaos -func PodNetworkDuplication(clients clients.ClientSets) { +func PodNetworkDuplication(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -91,7 +92,7 @@ func PodNetworkDuplication(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -107,7 +108,7 @@ func PodNetworkDuplication(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodNetworkDuplicationChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodNetworkDuplicationChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -135,7 +136,7 @@ func PodNetworkDuplication(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go b/experiments/generic/pod-network-latency/experiment/pod-network-latency.go index a4ca95682..efa7699a0 100644 --- a/experiments/generic/pod-network-latency/experiment/pod-network-latency.go +++ b/experiments/generic/pod-network-latency/experiment/pod-network-latency.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/latency" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodNetworkLatency inject the pod-network-latency chaos -func PodNetworkLatency(clients clients.ClientSets) { +func PodNetworkLatency(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -90,7 +91,7 @@ func PodNetworkLatency(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -107,7 +108,7 @@ func PodNetworkLatency(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodNetworkLatencyChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodNetworkLatencyChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -135,7 +136,7 @@ func PodNetworkLatency(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go b/experiments/generic/pod-network-loss/experiment/pod-network-loss.go index 926459318..cfb538156 100644 --- a/experiments/generic/pod-network-loss/experiment/pod-network-loss.go +++ b/experiments/generic/pod-network-loss/experiment/pod-network-loss.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/network-chaos/lib/loss" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/network-chaos/types" @@ -19,7 +20,7 @@ import ( ) // PodNetworkLoss inject the pod-network-loss chaos -func PodNetworkLoss(clients clients.ClientSets) { +func PodNetworkLoss(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} chaosDetails := types.ChaosDetails{} @@ -90,7 +91,7 @@ func PodNetworkLoss(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -106,7 +107,7 @@ func PodNetworkLoss(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PodNetworkLossChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PodNetworkLossChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -134,7 +135,7 @@ func PodNetworkLoss(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go b/experiments/generic/pod-network-partition/experiment/pod-network-partition.go index c0c300b86..44e73cd67 100644 --- a/experiments/generic/pod-network-partition/experiment/pod-network-partition.go +++ b/experiments/generic/pod-network-partition/experiment/pod-network-partition.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/pod-network-partition/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/environment" experimentTypes "github.com/litmuschaos/litmus-go/pkg/generic/pod-network-partition/types" @@ -19,7 +20,7 @@ import ( ) // PodNetworkPartition inject the pod-network-partition chaos -func PodNetworkPartition(clients clients.ClientSets) { +func PodNetworkPartition(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -89,7 +90,7 @@ func PodNetworkPartition(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -105,7 +106,7 @@ func PodNetworkPartition(clients clients.ClientSets) { } chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareAndInjectChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareAndInjectChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) log.Errorf("Chaos injection failed, err: %v", err) return @@ -133,7 +134,7 @@ func PodNetworkPartition(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go b/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go index 7bf9204c2..ef33545cf 100644 --- a/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go +++ b/experiments/kafka/kafka-broker-pod-failure/experiment/kafka-broker-pod-failure.go @@ -1,12 +1,13 @@ package experiment import ( + "context" "os" "strings" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" kafkaPodDelete "github.com/litmuschaos/litmus-go/chaoslib/litmus/kafka-broker-pod-failure/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/kafka" experimentEnv "github.com/litmuschaos/litmus-go/pkg/kafka/environment" @@ -21,7 +22,7 @@ import ( ) // KafkaBrokerPodFailure derive and kill the kafka broker leader -func KafkaBrokerPodFailure(clients clients.ClientSets) { +func KafkaBrokerPodFailure(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -89,7 +90,7 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -125,7 +126,7 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := kafkaPodDelete.PreparePodDelete(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := kafkaPodDelete.PreparePodDelete(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -154,7 +155,7 @@ func KafkaBrokerPodFailure(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := common.GetStatusMessage(chaosDetails.DefaultHealthCheck, "AUT: Running", "Unsuccessful") types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go b/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go index 5f64c7e4f..20d3cc248 100644 --- a/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go +++ b/experiments/kube-aws/ebs-loss-by-id/experiment/ebs-loss-by-id.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-id/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ebs" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/environment" @@ -19,7 +20,7 @@ import ( ) // EBSLossByID inject the ebs volume loss chaos -func EBSLossByID(clients clients.ClientSets) { +func EBSLossByID(ctx context.Context, clients clients.ClientSets) { var err error experimentsDetails := experimentTypes.ExperimentDetails{} @@ -90,7 +91,7 @@ func EBSLossByID(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -111,7 +112,7 @@ func EBSLossByID(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PrepareEBSLossByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PrepareEBSLossByID(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -137,7 +138,7 @@ func EBSLossByID(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go b/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go index 96a7323e6..44f201efa 100644 --- a/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go +++ b/experiments/kube-aws/ebs-loss-by-tag/experiment/ebs-loss-by-tag.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ebs-loss/lib/ebs-loss-by-tag/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ebs" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ebs-loss/environment" @@ -19,7 +20,7 @@ import ( ) // EBSLossByTag inject the ebs volume loss chaos -func EBSLossByTag(clients clients.ClientSets) { +func EBSLossByTag(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -88,7 +89,7 @@ func EBSLossByTag(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -109,7 +110,7 @@ func EBSLossByTag(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareEBSLossByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareEBSLossByTag(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -135,7 +136,7 @@ func EBSLossByTag(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go b/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go index 5db97c1e4..95010a803 100644 --- a/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go +++ b/experiments/kube-aws/ec2-terminate-by-id/experiment/ec2-terminate-by-id.go @@ -1,12 +1,13 @@ package experiment import ( + "context" "os" "strings" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ec2-terminate-by-id/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-id/environment" @@ -20,7 +21,7 @@ import ( ) // EC2TerminateByID inject the ebs volume loss chaos -func EC2TerminateByID(clients clients.ClientSets) { +func EC2TerminateByID(ctx context.Context, clients clients.ClientSets) { var ( err error @@ -86,7 +87,7 @@ func EC2TerminateByID(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -129,7 +130,7 @@ func EC2TerminateByID(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PrepareEC2TerminateByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PrepareEC2TerminateByID(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -167,7 +168,7 @@ func EC2TerminateByID(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go b/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go index 378bb1573..5d4a0ee8e 100644 --- a/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go +++ b/experiments/kube-aws/ec2-terminate-by-tag/experiment/ec2-terminate-tag.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/ec2-terminate-by-tag/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" aws "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2" "github.com/litmuschaos/litmus-go/pkg/events" experimentEnv "github.com/litmuschaos/litmus-go/pkg/kube-aws/ec2-terminate-by-tag/environment" @@ -19,7 +20,7 @@ import ( ) // EC2TerminateByTag inject the ebs volume loss chaos -func EC2TerminateByTag(clients clients.ClientSets) { +func EC2TerminateByTag(ctx context.Context, clients clients.ClientSets) { var ( err error @@ -71,7 +72,7 @@ func EC2TerminateByTag(clients clients.ClientSets) { log.InfoWithValues("The instance information is as follows", logrus.Fields{ "Chaos Duration": experimentsDetails.ChaosDuration, "Chaos Namespace": experimentsDetails.ChaosNamespace, - "Instance Tag": experimentsDetails.InstanceTag, + "Instance Tag": experimentsDetails.Ec2InstanceTag, "Instance Affected Percentage": experimentsDetails.InstanceAffectedPerc, "Sequence": experimentsDetails.Sequence, }) @@ -86,7 +87,7 @@ func EC2TerminateByTag(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -125,7 +126,7 @@ func EC2TerminateByTag(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.PrepareEC2TerminateByTag(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err = litmusLIB.PrepareEC2TerminateByTag(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -162,7 +163,7 @@ func EC2TerminateByTag(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/kube-aws/ec2-terminate-by-tag/test/test.yml b/experiments/kube-aws/ec2-terminate-by-tag/test/test.yml index a646b993a..6e91191c3 100644 --- a/experiments/kube-aws/ec2-terminate-by-tag/test/test.yml +++ b/experiments/kube-aws/ec2-terminate-by-tag/test/test.yml @@ -22,7 +22,7 @@ spec: - "3600" env: # value: key:value ex: team:devops - - name: INSTANCE_TAG + - name: EC2_INSTANCE_TAG value: '' - name: CHAOS_NAMESPACE diff --git a/experiments/load/k6-loadgen/README.md b/experiments/load/k6-loadgen/README.md new file mode 100644 index 000000000..c4323e45f --- /dev/null +++ b/experiments/load/k6-loadgen/README.md @@ -0,0 +1,14 @@ +## Experiment Metadata + + + + + + + + + + + + +
Name Description Documentation Link
k6 Load Generator k6 is an open-source load testing tool that makes performance testing easy and productive for engineering teams. You can easily run load testing through a single JS script. Learn how to use k6 here Here
diff --git a/experiments/load/k6-loadgen/experiment/k6-loadgen.go b/experiments/load/k6-loadgen/experiment/k6-loadgen.go new file mode 100644 index 000000000..0a62d3949 --- /dev/null +++ b/experiments/load/k6-loadgen/experiment/k6-loadgen.go @@ -0,0 +1,170 @@ +package experiment + +import ( + "context" + "os" + + "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" + litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/k6-loadgen/lib" + "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/events" + experimentEnv "github.com/litmuschaos/litmus-go/pkg/load/k6-loadgen/environment" + experimentTypes "github.com/litmuschaos/litmus-go/pkg/load/k6-loadgen/types" + "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/probe" + "github.com/litmuschaos/litmus-go/pkg/result" + "github.com/litmuschaos/litmus-go/pkg/status" + "github.com/litmuschaos/litmus-go/pkg/types" + "github.com/litmuschaos/litmus-go/pkg/utils/common" + "github.com/sirupsen/logrus" +) + +// Experiment contains steps to inject chaos +func Experiment(ctx context.Context, clients clients.ClientSets) { + + experimentsDetails := experimentTypes.ExperimentDetails{} + resultDetails := types.ResultDetails{} + eventsDetails := types.EventDetails{} + chaosDetails := types.ChaosDetails{} + + //Fetching all the ENV passed from the runner pod + log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME")) + experimentEnv.GetENV(&experimentsDetails) + + // Initialize the chaos attributes + types.InitialiseChaosVariables(&chaosDetails) + + // Initialize Chaos Result Parameters + types.SetResultAttributes(&resultDetails, chaosDetails) + + if experimentsDetails.EngineName != "" { + // Get values from chaosengine. Bail out upon error, as we haven't entered exp business logic yet + if err := types.GetValuesFromChaosEngine(&chaosDetails, clients, &resultDetails); err != nil { + log.Errorf("Unable to initialize the probes, err: %v", err) + return + } + } + + //Updating the chaos result in the beginning of experiment + log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName) + if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil { + log.Errorf("Unable to Create the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } + + // Set the chaos result uid + result.SetResultUID(&resultDetails, clients, &chaosDetails) + + // generating the event in chaosresult to marked the verdict as awaited + msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited" + types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + + //DISPLAY THE APP INFORMATION + log.InfoWithValues("[Info]: The application information is as follows", logrus.Fields{ + "Namespace": experimentsDetails.AppNS, + "Label": experimentsDetails.AppLabel, + "Chaos Duration": experimentsDetails.ChaosDuration, + }) + + // Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result + go common.AbortWatcher(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails) + + //PRE-CHAOS APPLICATION STATUS CHECK + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)") + if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } + } + + if experimentsDetails.EngineName != "" { + // marking AUT as running, as we already checked the status of application under test + msg := "AUT: Running" + + // run the probes in the pre-chaos check + if len(resultDetails.ProbeDetails) != 0 { + + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + log.Errorf("Probe Failed, err: %v", err) + msg := "AUT: Running, Probes: Unsuccessful" + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } + msg = "AUT: Running, Probes: Successful" + } + // generating the events for the pre-chaos check + types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } + chaosDetails.Phase = types.ChaosInjectPhase + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + log.Errorf("Chaos injection failed, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } + log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName) + resultDetails.Verdict = v1alpha1.ResultVerdictPassed + chaosDetails.Phase = types.PostChaosPhase + + //POST-CHAOS APPLICATION STATUS CHECK + if chaosDetails.DefaultHealthCheck { + log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)") + if err := status.AUTStatusCheck(clients, &chaosDetails); err != nil { + log.Errorf("Application status check failed, err: %v", err) + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, "AUT: Not Running", "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } + } + + if experimentsDetails.EngineName != "" { + // marking AUT as running, as we already checked the status of application under test + msg := "AUT: Running" + + // run the probes in the post-chaos check + if len(resultDetails.ProbeDetails) != 0 { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + log.Errorf("Probes Failed, err: %v", err) + msg := "AUT: Running, Probes: Unsuccessful" + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } + msg = "AUT: Running, Probes: Successful" + } + + // generating post chaos event + types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } + + //Updating the chaosResult in the end of experiment + log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName) + if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil { + log.Errorf("Unable to Update the Chaos Result, err: %v", err) + result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) + return + } + + // generating the event in chaosresult to mark the verdict as pass/fail + msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict) + reason, eventType := types.GetChaosResultVerdictEvent(resultDetails.Verdict) + types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult") + + if experimentsDetails.EngineName != "" { + msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed" + types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails) + events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine") + } +} diff --git a/experiments/load/k6-loadgen/rbac.yaml b/experiments/load/k6-loadgen/rbac.yaml new file mode 100644 index 000000000..25095e4ff --- /dev/null +++ b/experiments/load/k6-loadgen/rbac.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k6-loadgen-sa + namespace: default + labels: + name: k6-loadgen-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: k6-loadgen-sa + namespace: default + labels: + name: k6-loadgen-sa +rules: +- apiGroups: ["","litmuschaos.io","batch","apps"] + resources: ["pods","configmaps","jobs","pods/exec","pods/log","events","chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: k6-loadgen-sa + namespace: default + labels: + name: k6-loadgen-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: k6-loadgen-sa +subjects: +- kind: ServiceAccount + name: k6-loadgen-sa + namespace: default + diff --git a/experiments/load/k6-loadgen/test/test.yml b/experiments/load/k6-loadgen/test/test.yml new file mode 100644 index 000000000..3abc7820e --- /dev/null +++ b/experiments/load/k6-loadgen/test/test.yml @@ -0,0 +1,61 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: litmus-experiment +spec: + replicas: 1 + selector: + matchLabels: + app: litmus-experiment + template: + metadata: + labels: + app: litmus-experiment + spec: + serviceAccountName: k6-loadgen-sa + containers: + - name: gotest + image: busybox + command: + - sleep + - "3600" + env: + # provide application namespace + - name: APP_NAMESPACE + value: '' + + # provide application labels + - name: APP_LABEL + value: '' + + # provide application kind + - name: APP_KIND + value: '' + + - name: TOTAL_CHAOS_DURATION + value: '' + + # provide auxiliary application details - namespace and labels of the applications + # sample input is - "ns1:app=percona,ns2:name=nginx" + - name: AUXILIARY_APPINFO + value: '' + + ## Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' + + # provide the chaos namespace + - name: CHAOS_NAMESPACE + value: '' + + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + + - name: CHAOS_SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + diff --git a/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go b/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go index 7e3dc57e2..45d27ef62 100644 --- a/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go +++ b/experiments/spring-boot/spring-boot-faults/experiment/spring-boot-faults.go @@ -1,6 +1,7 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" @@ -19,7 +20,7 @@ import ( ) // Experiment contains steps to inject chaos -func Experiment(clients clients.ClientSets, expName string) { +func Experiment(ctx context.Context, clients clients.ClientSets, expName string) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -113,7 +114,7 @@ func Experiment(clients clients.ClientSets, expName string) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -130,7 +131,7 @@ func Experiment(clients clients.ClientSets, expName string) { chaosDetails.Phase = types.ChaosInjectPhase - if err := litmusLIB.PrepareChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { + if err := litmusLIB.PrepareChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil { log.Errorf("Chaos injection failed, err: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -159,7 +160,7 @@ func Experiment(clients clients.ClientSets, expName string) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err := probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed, err: %v", err) msg := "AUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go b/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go index bee4d4193..551d717d0 100644 --- a/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go +++ b/experiments/vmware/vm-poweroff/experiment/vm-poweroff.go @@ -1,11 +1,12 @@ package experiment import ( + "context" "os" "github.com/litmuschaos/chaos-operator/api/litmuschaos/v1alpha1" litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/vm-poweroff/lib" - clients "github.com/litmuschaos/litmus-go/pkg/clients" + "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/cloud/vmware" "github.com/litmuschaos/litmus-go/pkg/events" "github.com/litmuschaos/litmus-go/pkg/log" @@ -22,7 +23,7 @@ import ( var err error // VMPoweroff contains steps to inject vm-power-off chaos -func VMPoweroff(clients clients.ClientSets) { +func VMPoweroff(ctx context.Context, clients clients.ClientSets) { experimentsDetails := experimentTypes.ExperimentDetails{} resultDetails := types.ResultDetails{} @@ -110,7 +111,7 @@ func VMPoweroff(clients clients.ClientSets) { // run the probes in the pre-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil { log.Errorf("Probe Failed: %v", err) msg := "IUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails) @@ -131,7 +132,7 @@ func VMPoweroff(clients clients.ClientSets) { chaosDetails.Phase = types.ChaosInjectPhase - if err = litmusLIB.InjectVMPowerOffChaos(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails, cookie); err != nil { + if err = litmusLIB.InjectVMPowerOffChaos(ctx, &experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails, cookie); err != nil { log.Errorf("Chaos injection failed: %v", err) result.RecordAfterFailure(&chaosDetails, &resultDetails, err, clients, &eventsDetails) return @@ -159,7 +160,7 @@ func VMPoweroff(clients clients.ClientSets) { // run the probes in the post-chaos check if len(resultDetails.ProbeDetails) != 0 { - if err = probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { + if err = probe.RunProbes(ctx, &chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil { log.Errorf("Probes Failed: %v", err) msg := "IUT: Running, Probes: Unsuccessful" types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails) diff --git a/go.mod b/go.mod index ae41927da..e851f0a5f 100644 --- a/go.mod +++ b/go.mod @@ -1,52 +1,66 @@ module github.com/litmuschaos/litmus-go -go 1.18 +go 1.22.0 require ( - github.com/Azure/azure-sdk-for-go v56.1.0+incompatible - github.com/Azure/go-autorest/autorest v0.11.18 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible + github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/azure/auth v0.5.7 github.com/aws/aws-sdk-go v1.38.59 github.com/containerd/cgroups v1.0.1 github.com/kyokomi/emoji v2.2.4+incompatible - github.com/litmuschaos/chaos-operator v0.0.0-20240104104915-2d8472873222 + github.com/litmuschaos/chaos-operator v0.0.0-20240301085554-ba4d2f704cfa github.com/palantir/stacktrace v0.0.0-20161112013806-78658fd2d177 github.com/pkg/errors v0.9.1 github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.1.1 - google.golang.org/api v0.48.0 + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 + google.golang.org/api v0.169.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.26.0 - k8s.io/apimachinery v0.26.0 + k8s.io/api v0.30.1 + k8s.io/apimachinery v0.30.1 k8s.io/client-go v12.0.0+incompatible k8s.io/klog v1.0.0 ) require ( - cloud.google.com/go v0.83.0 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.22 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503 // indirect github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cilium/ebpf v0.6.2 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/go-units v0.4.0 // indirect - github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/go-cmp v0.5.6 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.2 // indirect github.com/googleapis/gnostic v0.5.5 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -56,19 +70,23 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.opencensus.io v0.23.0 // indirect - golang.org/x/crypto v0.16.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 // indirect - google.golang.org/grpc v1.38.0 // indirect - google.golang.org/protobuf v1.26.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.80.1 // indirect diff --git a/go.sum b/go.sum index fb2cb5834..9246c2381 100644 --- a/go.sum +++ b/go.sum @@ -9,51 +9,38 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0 h1:bAMqZidYkmIsUqe6PtkEPT7Q+vfizScn+jfNA6jwK9c= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v56.1.0+incompatible h1:Ofcecdw3F1ZqnpDEZcLzH9Hq0P4Y5Si8+EioXJSamJs= -github.com/Azure/azure-sdk-for-go v56.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.22 h1:/GblQdIudfEM3AWWZ0mrYJQSd7JS4S/Mbzh6F0ov0Xc= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= github.com/Azure/go-autorest/autorest/azure/auth v0.5.7 h1:8DQB8yl7aLQuP+nuR5e2RO6454OvFlSTXXaNHshc16s= github.com/Azure/go-autorest/autorest/azure/auth v0.5.7/go.mod h1:AkzUsqkrdmNhfP2i54HqINVQopw0CLDnvHpJ88Zz1eI= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= @@ -63,8 +50,9 @@ github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8K github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503 h1:2McfZNaDqGPjv2pddK547PENIk4HV+NT7gvqRq4L0us= github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503 h1:RBrGlrkPWapMcLp1M6ywCqyYKOAT5ERI6lYFvGKOThE= @@ -101,6 +89,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -112,8 +102,6 @@ github.com/cilium/ebpf v0.6.2 h1:iHsfF/t4aW4heW2YKfeHrVPGdtYTL4C4KocpM8KTSnI= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= @@ -154,17 +142,14 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -184,8 +169,11 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -204,6 +192,9 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -217,15 +208,11 @@ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -235,51 +222,44 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= +github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= @@ -293,6 +273,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -315,7 +297,6 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -347,8 +328,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -356,8 +338,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kyokomi/emoji v2.2.4+incompatible h1:np0woGKwx9LiHAQmwZx79Oc0rHpNw3o+3evou4BEPv4= github.com/kyokomi/emoji v2.2.4+incompatible/go.mod h1:mZ6aGCD7yk8j6QY6KICwnZ2pxoszVseX1DNoGtU2tBA= -github.com/litmuschaos/chaos-operator v0.0.0-20240104104915-2d8472873222 h1:e7QsO2cL0/aMlCNM2BRRg2gfv05C9ZeVF2U0BE7IPcE= -github.com/litmuschaos/chaos-operator v0.0.0-20240104104915-2d8472873222/go.mod h1:yDZVtAgRVgoQtf8tSN58tpus0kGFFJXTj/bppJCRrdo= +github.com/litmuschaos/chaos-operator v0.0.0-20240301085554-ba4d2f704cfa h1:Avbgl6Pcqm2yfpAHOD3Cd5x2KnMPv+HJkE9e6I4oo5k= +github.com/litmuschaos/chaos-operator v0.0.0-20240301085554-ba4d2f704cfa/go.mod h1:yDZVtAgRVgoQtf8tSN58tpus0kGFFJXTj/bppJCRrdo= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -450,6 +432,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -481,24 +465,28 @@ github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -507,14 +495,30 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= +go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -530,8 +534,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -554,8 +561,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -565,9 +570,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -584,7 +587,6 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -595,51 +597,36 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -673,59 +660,50 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -761,33 +739,16 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -799,28 +760,14 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0 h1:RDAPWfNFY06dffEXfn7hZF5Fr1ZbnChzfQZAPyBd1+I= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= +google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -839,34 +786,14 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08 h1:pc16UedxnxXXtGxHCSUhafAoVHQZ0yXl8ZelMH4EETc= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -875,21 +802,9 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -901,14 +816,16 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -942,7 +859,6 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= diff --git a/pkg/baremetal/redfish-node-restart/environment/environment.go b/pkg/baremetal/redfish-node-restart/environment/environment.go index bafe960ae..90585cc9c 100644 --- a/pkg/baremetal/redfish-node-restart/environment/environment.go +++ b/pkg/baremetal/redfish-node-restart/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/cassandra/pod-delete/environment/environment.go b/pkg/cassandra/pod-delete/environment/environment.go index b081cc714..72d264ce3 100644 --- a/pkg/cassandra/pod-delete/environment/environment.go +++ b/pkg/cassandra/pod-delete/environment/environment.go @@ -9,7 +9,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(cassandraDetails *cassandraTypes.ExperimentDetails) { var ChaoslibDetail exp.ExperimentDetails diff --git a/pkg/cloud/aws/common/common.go b/pkg/cloud/aws/common/common.go index 5d58e8558..ec76241f1 100644 --- a/pkg/cloud/aws/common/common.go +++ b/pkg/cloud/aws/common/common.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" ) -//GetAWSSession will return the aws session for a given region +// GetAWSSession will return the aws session for a given region func GetAWSSession(region string) *session.Session { return session.Must(session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, @@ -15,7 +15,7 @@ func GetAWSSession(region string) *session.Session { })) } -//CheckAWSError will return the aws errors +// CheckAWSError will return the aws errors func CheckAWSError(err error) error { if aerr, ok := err.(awserr.Error); ok { switch aerr.Code() { diff --git a/pkg/events/event.go b/pkg/events/event.go index 70c1b1c2f..21b9b0957 100644 --- a/pkg/events/event.go +++ b/pkg/events/event.go @@ -11,7 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -//CreateEvents create the events in the desired resource +// CreateEvents create the events in the desired resource func CreateEvents(eventsDetails *types.EventDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, kind, eventName string) error { events := &apiv1.Event{ ObjectMeta: metav1.ObjectMeta{ @@ -40,7 +40,7 @@ func CreateEvents(eventsDetails *types.EventDetails, clients clients.ClientSets, return err } -//GenerateEvents update the events and increase the count by 1, if already present +// GenerateEvents update the events and increase the count by 1, if already present // else it will create a new event func GenerateEvents(eventsDetails *types.EventDetails, clients clients.ClientSets, chaosDetails *types.ChaosDetails, kind string) error { diff --git a/pkg/generic/docker-service-kill/environment/environment.go b/pkg/generic/docker-service-kill/environment/environment.go index 6e360d9ed..f515b7a21 100644 --- a/pkg/generic/docker-service-kill/environment/environment.go +++ b/pkg/generic/docker-service-kill/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "docker-service-kill") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/kubelet-service-kill/environment/environment.go b/pkg/generic/kubelet-service-kill/environment/environment.go index 6d524fa49..c9eed0b1b 100644 --- a/pkg/generic/kubelet-service-kill/environment/environment.go +++ b/pkg/generic/kubelet-service-kill/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "kubelet-service-kill") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/node-cpu-hog/environment/environment.go b/pkg/generic/node-cpu-hog/environment/environment.go index 75e0b6d9f..644021bca 100644 --- a/pkg/generic/node-cpu-hog/environment/environment.go +++ b/pkg/generic/node-cpu-hog/environment/environment.go @@ -8,7 +8,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "node-cpu-hog") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/node-drain/environment/environment.go b/pkg/generic/node-drain/environment/environment.go index 46065ba1e..5d7707d7d 100644 --- a/pkg/generic/node-drain/environment/environment.go +++ b/pkg/generic/node-drain/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "node-drain") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/node-io-stress/environment/environment.go b/pkg/generic/node-io-stress/environment/environment.go index ad229c360..dac8033ab 100644 --- a/pkg/generic/node-io-stress/environment/environment.go +++ b/pkg/generic/node-io-stress/environment/environment.go @@ -8,7 +8,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "node-io-stress") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/node-memory-hog/environment/environment.go b/pkg/generic/node-memory-hog/environment/environment.go index d914351a5..edae0f468 100644 --- a/pkg/generic/node-memory-hog/environment/environment.go +++ b/pkg/generic/node-memory-hog/environment/environment.go @@ -8,7 +8,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "node-memory-hog") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/node-restart/environment/environment.go b/pkg/generic/node-restart/environment/environment.go index 54fb4222c..e86a6cc23 100644 --- a/pkg/generic/node-restart/environment/environment.go +++ b/pkg/generic/node-restart/environment/environment.go @@ -8,7 +8,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "node-restart") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/node-taint/environment/environment.go b/pkg/generic/node-taint/environment/environment.go index c04183c92..5531525fc 100644 --- a/pkg/generic/node-taint/environment/environment.go +++ b/pkg/generic/node-taint/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "node-taint") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/pod-autoscaler/environment/environment.go b/pkg/generic/pod-autoscaler/environment/environment.go index 1d80d5f2c..cdd3c9cb0 100644 --- a/pkg/generic/pod-autoscaler/environment/environment.go +++ b/pkg/generic/pod-autoscaler/environment/environment.go @@ -8,7 +8,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "pod-autoscaler") diff --git a/pkg/generic/pod-cpu-hog-exec/environment/environment.go b/pkg/generic/pod-cpu-hog-exec/environment/environment.go index 3619db2bd..397a39a1b 100644 --- a/pkg/generic/pod-cpu-hog-exec/environment/environment.go +++ b/pkg/generic/pod-cpu-hog-exec/environment/environment.go @@ -8,7 +8,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "pod-cpu-hog") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/pod-delete/environment/environment.go b/pkg/generic/pod-delete/environment/environment.go index dce477981..d2f0cefa1 100644 --- a/pkg/generic/pod-delete/environment/environment.go +++ b/pkg/generic/pod-delete/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "pod-delete") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/pod-fio-stress/environment/environment.go b/pkg/generic/pod-fio-stress/environment/environment.go index 9c2140c64..41f257563 100644 --- a/pkg/generic/pod-fio-stress/environment/environment.go +++ b/pkg/generic/pod-fio-stress/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/pod-memory-hog-exec/environment/environment.go b/pkg/generic/pod-memory-hog-exec/environment/environment.go index 4cfc97834..a325eedfb 100644 --- a/pkg/generic/pod-memory-hog-exec/environment/environment.go +++ b/pkg/generic/pod-memory-hog-exec/environment/environment.go @@ -8,7 +8,7 @@ import ( clientTypes "k8s.io/apimachinery/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "pod-memory-hog") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/generic/pod-network-partition/environment/environment.go b/pkg/generic/pod-network-partition/environment/environment.go index a6b456696..50233b245 100644 --- a/pkg/generic/pod-network-partition/environment/environment.go +++ b/pkg/generic/pod-network-partition/environment/environment.go @@ -9,7 +9,7 @@ import ( "github.com/litmuschaos/litmus-go/pkg/types" ) -//GetENV fetches all the env variables from the runner pod +// GetENV fetches all the env variables from the runner pod func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "pod-network-partition") experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") diff --git a/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go b/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go index 3c40d710b..3fd69b97f 100644 --- a/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go +++ b/pkg/kube-aws/ec2-terminate-by-tag/environment/environment.go @@ -25,7 +25,7 @@ func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) experimentDetails.Region = types.Getenv("REGION", "") experimentDetails.ManagedNodegroup = types.Getenv("MANAGED_NODEGROUP", "disable") - experimentDetails.InstanceTag = strings.TrimSpace(types.Getenv("INSTANCE_TAG", "")) + experimentDetails.Ec2InstanceTag = strings.TrimSpace(types.Getenv("EC2_INSTANCE_TAG", "")) experimentDetails.InstanceAffectedPerc, _ = strconv.Atoi(types.Getenv("INSTANCE_AFFECTED_PERC", "0")) experimentDetails.Sequence = types.Getenv("SEQUENCE", "parallel") } diff --git a/pkg/kube-aws/ec2-terminate-by-tag/types/types.go b/pkg/kube-aws/ec2-terminate-by-tag/types/types.go index 972aa4b4e..e6d7d45e7 100644 --- a/pkg/kube-aws/ec2-terminate-by-tag/types/types.go +++ b/pkg/kube-aws/ec2-terminate-by-tag/types/types.go @@ -17,7 +17,7 @@ type ExperimentDetails struct { ChaosPodName string Timeout int Delay int - InstanceTag string + Ec2InstanceTag string Region string InstanceAffectedPerc int ManagedNodegroup string diff --git a/pkg/load/k6-loadgen/environment/environment.go b/pkg/load/k6-loadgen/environment/environment.go new file mode 100644 index 000000000..aa7507f16 --- /dev/null +++ b/pkg/load/k6-loadgen/environment/environment.go @@ -0,0 +1,29 @@ +package environment + +import ( + "strconv" + + experimentTypes "github.com/litmuschaos/litmus-go/pkg/load/k6-loadgen/types" + "github.com/litmuschaos/litmus-go/pkg/types" +) + +// GetENV fetches all the env variables from the runner pod +func GetENV(experimentDetails *experimentTypes.ExperimentDetails) { + experimentDetails.ExperimentName = types.Getenv("EXPERIMENT_NAME", "") + experimentDetails.ChaosNamespace = types.Getenv("CHAOS_NAMESPACE", "litmus") + experimentDetails.EngineName = types.Getenv("CHAOSENGINE", "") + experimentDetails.ChaosDuration, _ = strconv.Atoi(types.Getenv("TOTAL_CHAOS_DURATION", "30")) + experimentDetails.ChaosInterval, _ = strconv.Atoi(types.Getenv("CHAOS_INTERVAL", "10")) + experimentDetails.RampTime, _ = strconv.Atoi(types.Getenv("RAMP_TIME", "0")) + experimentDetails.AppNS = types.Getenv("APP_NAMESPACE", "") + experimentDetails.AppLabel = types.Getenv("APP_LABEL", "") + experimentDetails.AppKind = types.Getenv("APP_KIND", "") + experimentDetails.Delay, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_DELAY", "2")) + experimentDetails.Timeout, _ = strconv.Atoi(types.Getenv("STATUS_CHECK_TIMEOUT", "180")) + experimentDetails.PodsAffectedPerc, _ = strconv.Atoi(types.Getenv("PODS_AFFECTED_PERC", "0")) + experimentDetails.LIBImagePullPolicy = types.Getenv("LIB_IMAGE_PULL_POLICY", "Always") + experimentDetails.LIBImage = types.Getenv("LIB_IMAGE", "ghcr.io/grafana/k6-operator:latest-runner") + experimentDetails.ScriptSecretName = types.Getenv("SCRIPT_SECRET_NAME", "k6-script") + experimentDetails.ScriptSecretKey = types.Getenv("SCRIPT_SECRET_KEY", "script.js") + experimentDetails.OTELMetricPrefix = types.Getenv("OTEL_METRIC_PREFIX", "k6_") +} diff --git a/pkg/load/k6-loadgen/types/types.go b/pkg/load/k6-loadgen/types/types.go new file mode 100644 index 000000000..9b6d6877f --- /dev/null +++ b/pkg/load/k6-loadgen/types/types.go @@ -0,0 +1,22 @@ +package types + +// ExperimentDetails is for collecting all the experiment-related details +type ExperimentDetails struct { + ExperimentName string + EngineName string + ChaosDuration int + ChaosInterval int + RampTime int + AppNS string + AppLabel string + AppKind string + ChaosNamespace string + Timeout int + Delay int + PodsAffectedPerc int + LIBImagePullPolicy string + LIBImage string + ScriptSecretName string + ScriptSecretKey string + OTELMetricPrefix string +} diff --git a/pkg/log/log.go b/pkg/log/log.go index caf36024e..cfc75e1fc 100644 --- a/pkg/log/log.go +++ b/pkg/log/log.go @@ -4,24 +4,24 @@ import ( logrus "github.com/sirupsen/logrus" ) -//Fatalf Logs first and then calls `logger.Exit(1)` +// Fatalf Logs first and then calls `logger.Exit(1)` // logging level is set to Panic. func Fatalf(msg string, err ...interface{}) { logrus.WithFields(logrus.Fields{}).Fatalf(msg, err...) } -//Fatal Logs first and then calls `logger.Exit(1)` +// Fatal Logs first and then calls `logger.Exit(1)` // logging level is set to Panic. func Fatal(msg string) { logrus.WithFields(logrus.Fields{}).Fatal(msg) } -//Infof log the General operational entries about what's going on inside the application +// Infof log the General operational entries about what's going on inside the application func Infof(msg string, val ...interface{}) { logrus.WithFields(logrus.Fields{}).Infof(msg, val...) } -//Info log the General operational entries about what's going on inside the application +// Info log the General operational entries about what's going on inside the application func Info(msg string) { logrus.WithFields(logrus.Fields{}).Infof(msg) } @@ -38,23 +38,23 @@ func ErrorWithValues(msg string, val map[string]interface{}) { logrus.WithFields(val).Error(msg) } -//Warn log the Non-critical entries that deserve eyes. +// Warn log the Non-critical entries that deserve eyes. func Warn(msg string) { logrus.WithFields(logrus.Fields{}).Warn(msg) } -//Warnf log the Non-critical entries that deserve eyes. +// Warnf log the Non-critical entries that deserve eyes. func Warnf(msg string, val ...interface{}) { logrus.WithFields(logrus.Fields{}).Warnf(msg, val...) } -//Errorf used for errors that should definitely be noted. +// Errorf used for errors that should definitely be noted. // Commonly used for hooks to send errors to an error tracking service. func Errorf(msg string, err ...interface{}) { logrus.WithFields(logrus.Fields{}).Errorf(msg, err...) } -//Error used for errors that should definitely be noted. +// Error used for errors that should definitely be noted. // Commonly used for hooks to send errors to an error tracking service func Error(msg string) { logrus.WithFields(logrus.Fields{}).Error(msg) diff --git a/pkg/math/math.go b/pkg/math/math.go index 30a94f09f..26b18ef26 100644 --- a/pkg/math/math.go +++ b/pkg/math/math.go @@ -8,7 +8,7 @@ func Maximum(a int, b int) int { return b } -//Minimum calculates the minimum value among two integers +// Minimum calculates the minimum value among two integers func Minimum(a int, b int) int { if a > b { return b @@ -16,7 +16,7 @@ func Minimum(a int, b int) int { return a } -//Adjustment contains rule of three for calculating an integer given another integer representing a percentage +// Adjustment contains rule of three for calculating an integer given another integer representing a percentage func Adjustment(a int, b int) int { return (a * b / 100) } diff --git a/pkg/probe/cmdprobe.go b/pkg/probe/cmdprobe.go index 6d26b6a30..bd3f82aa8 100644 --- a/pkg/probe/cmdprobe.go +++ b/pkg/probe/cmdprobe.go @@ -179,6 +179,7 @@ func createProbePod(clients clients.ClientSets, chaosDetails *types.ChaosDetails ServiceAccountName: svcAccount, Volumes: volume, NodeSelector: source.NodeSelector, + Tolerations: source.Tolerations, ImagePullSecrets: source.ImagePullSecrets, Containers: []apiv1.Container{ { @@ -351,6 +352,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v cmd probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true @@ -409,6 +411,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v cmd probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true @@ -477,6 +480,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v cmd probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true @@ -545,6 +549,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v cmd probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true diff --git a/pkg/probe/httpprobe.go b/pkg/probe/httpprobe.go index 8412adeae..9a87c67b7 100644 --- a/pkg/probe/httpprobe.go +++ b/pkg/probe/httpprobe.go @@ -3,6 +3,7 @@ package probe import ( "bytes" "fmt" + "github.com/litmuschaos/litmus-go/pkg/utils" "os/exec" "reflect" "strconv" @@ -115,6 +116,9 @@ func httpGet(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails // getting the response from the given url resp, err := client.Get(probe.HTTPProbeInputs.URL) if err != nil { + if utils.HttpTimeout(err) { + return cerrors.Error{ErrorCode: cerrors.FailureTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} + } return cerrors.Error{ErrorCode: cerrors.ErrorTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} } @@ -159,6 +163,9 @@ func httpPost(probe v1alpha1.ProbeAttributes, client *http.Client, resultDetails Try(func(attempt uint) error { resp, err := client.Post(probe.HTTPProbeInputs.URL, probe.HTTPProbeInputs.Method.Post.ContentType, strings.NewReader(body)) if err != nil { + if utils.HttpTimeout(err) { + return cerrors.Error{ErrorCode: cerrors.FailureTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} + } return cerrors.Error{ErrorCode: cerrors.ErrorTypeHttpProbe, Target: fmt.Sprintf("{name: %v}", probe.Name), Reason: err.Error()} } code := strconv.Itoa(resp.StatusCode) @@ -244,6 +251,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v http probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true @@ -395,6 +403,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) isExperimentFailed = true break loop diff --git a/pkg/probe/k8sprobe.go b/pkg/probe/k8sprobe.go index aa33eaeb8..8e07cf570 100644 --- a/pkg/probe/k8sprobe.go +++ b/pkg/probe/k8sprobe.go @@ -156,6 +156,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("the %v k8s probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true @@ -435,6 +436,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v k8s probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true diff --git a/pkg/probe/probe.go b/pkg/probe/probe.go index 909bdaeca..fe6e1a271 100644 --- a/pkg/probe/probe.go +++ b/pkg/probe/probe.go @@ -13,9 +13,11 @@ import ( "github.com/litmuschaos/litmus-go/pkg/cerrors" "github.com/litmuschaos/litmus-go/pkg/clients" "github.com/litmuschaos/litmus-go/pkg/log" + "github.com/litmuschaos/litmus-go/pkg/telemetry" "github.com/litmuschaos/litmus-go/pkg/types" "github.com/palantir/stacktrace" "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -23,7 +25,9 @@ var err error // RunProbes contains the steps to trigger the probes // It contains steps to trigger all three probes: k8sprobe, httpprobe, cmdprobe -func RunProbes(chaosDetails *types.ChaosDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, phase string, eventsDetails *types.EventDetails) error { +func RunProbes(ctx context.Context, chaosDetails *types.ChaosDetails, clients clients.ClientSets, resultDetails *types.ResultDetails, phase string, eventsDetails *types.EventDetails) error { + ctx, span := otel.Tracer(telemetry.TracerName).Start(ctx, "RunProbes") + defer span.End() // get the probes details from the chaosengine probes, err := getProbesFromChaosEngine(chaosDetails, clients) diff --git a/pkg/probe/promProbe.go b/pkg/probe/promProbe.go index 4e02aa7fc..c80b8b1f5 100644 --- a/pkg/probe/promProbe.go +++ b/pkg/probe/promProbe.go @@ -262,6 +262,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v prom probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true @@ -319,6 +320,7 @@ loop: for index := range chaosresult.ProbeDetails { if chaosresult.ProbeDetails[index].Name == probe.Name { chaosresult.ProbeDetails[index].IsProbeFailedWithError = err + chaosresult.ProbeDetails[index].HasProbeCompleted = true chaosresult.ProbeDetails[index].Status.Description = getDescription(err) log.Errorf("The %v prom probe has been Failed, err: %v", probe.Name, err) isExperimentFailed = true diff --git a/pkg/telemetry/otel.go b/pkg/telemetry/otel.go new file mode 100644 index 000000000..2b30c9fd5 --- /dev/null +++ b/pkg/telemetry/otel.go @@ -0,0 +1,91 @@ +package telemetry + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" +) + +const ( + OTELExperimentJobServiceName = "chaos_experiment_job" + OTELExperimentJobHelperServiceName = "chaos_experiment_job_helper" + OTELExporterOTLPEndpoint = "OTEL_EXPORTER_OTLP_ENDPOINT" +) + +func InitOTelSDK(ctx context.Context, isExperiment bool, endpoint string) (shutdown func(context.Context) error, err error) { + var shutdownFuncs []func(context.Context) error + + shutdown = func(ctx context.Context) error { + var err error + for _, fn := range shutdownFuncs { + err = errors.Join(err, fn(ctx)) + } + shutdownFuncs = nil + return err + } + + handleErr := func(inErr error) { + err = errors.Join(inErr, shutdown(ctx)) + } + + tracerProvider, err := newTracerProvider(ctx, isExperiment, endpoint) + if err != nil { + handleErr(err) + return + } + + prop := newPropagator() + otel.SetTextMapPropagator(prop) + + shutdownFuncs = append(shutdownFuncs, tracerProvider.Shutdown) + otel.SetTracerProvider(tracerProvider) + + // TODO: need to add metrics & logging provider + return +} + +func newPropagator() propagation.TextMapPropagator { + return propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + ) +} + +func newTracerProvider(ctx context.Context, isExperiment bool, endpoint string) (*trace.TracerProvider, error) { + serviceName := OTELExperimentJobHelperServiceName + if isExperiment { + serviceName = OTELExperimentJobServiceName + } + res, err := resource.New(ctx, + resource.WithAttributes( + semconv.ServiceNameKey.String(serviceName), + ), + ) + traceExporter, err := otlptrace.New( + ctx, + otlptracegrpc.NewClient( + // TODO: add secure option + otlptracegrpc.WithInsecure(), + otlptracegrpc.WithEndpoint(endpoint), + ), + ) + if err != nil { + return nil, err + } + + batchSpanProcessor := trace.NewBatchSpanProcessor(traceExporter) + tracerProvider := trace.NewTracerProvider( + trace.WithSampler(trace.AlwaysSample()), + trace.WithResource(res), + trace.WithSpanProcessor(batchSpanProcessor), + ) + + return tracerProvider, nil +} diff --git a/pkg/telemetry/tracing.go b/pkg/telemetry/tracing.go new file mode 100644 index 000000000..83c24e852 --- /dev/null +++ b/pkg/telemetry/tracing.go @@ -0,0 +1,56 @@ +package telemetry + +import ( + "context" + "encoding/json" + "os" + + "github.com/litmuschaos/litmus-go/pkg/log" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" +) + +const ( + TracerName = "litmuschaos.io/litmus-go" + TraceParent = "TRACE_PARENT" +) + +func GetTraceParentContext() context.Context { + traceParent := os.Getenv(TraceParent) + + if traceParent == "" { + return context.Background() + } + + pro := otel.GetTextMapPropagator() + carrier := make(map[string]string) + if err := json.Unmarshal([]byte(traceParent), &carrier); err != nil { + log.Fatal(err.Error()) + } + + return pro.Extract(context.Background(), propagation.MapCarrier(carrier)) +} + +// GetMarshalledSpanFromContext Extract spanContext from the context and return it as json encoded string +func GetMarshalledSpanFromContext(ctx context.Context) string { + carrier := make(map[string]string) + pro := otel.GetTextMapPropagator() + + pro.Inject(ctx, propagation.MapCarrier(carrier)) + + if len(carrier) == 0 { + log.Error("spanContext not present in the context, unable to marshall") + return "" + } + + marshalled, err := json.Marshal(carrier) + if err != nil { + log.Error(err.Error()) + return "" + } + if len(marshalled) >= 1024 { + log.Error("marshalled span context is too large, unable to marshall") + return "" + } + return string(marshalled) +} diff --git a/pkg/types/types.go b/pkg/types/types.go index c83bd66ed..dc9ab2af8 100644 --- a/pkg/types/types.go +++ b/pkg/types/types.go @@ -219,6 +219,7 @@ func InitialiseChaosVariables(chaosDetails *ChaosDetails) { chaosDetails.Targets = []v1alpha1.TargetDetails{} chaosDetails.Phase = PreChaosPhase chaosDetails.ProbeContext.Ctx, chaosDetails.ProbeContext.CancelFunc = context.WithCancel(context.Background()) + chaosDetails.Labels = map[string]string{} } // SetResultAttributes initialise all the chaos result ENV diff --git a/pkg/utils/common/common.go b/pkg/utils/common/common.go index 1b1f09d8d..8759627d2 100644 --- a/pkg/utils/common/common.go +++ b/pkg/utils/common/common.go @@ -10,6 +10,7 @@ import ( "os/exec" "os/signal" "reflect" + "regexp" "strconv" "strings" "syscall" @@ -29,13 +30,17 @@ type ENVDetails struct { ENV []apiv1.EnvVar } -//WaitForDuration waits for the given time duration (in seconds) +// WaitForDuration waits for the given time duration (in seconds) func WaitForDuration(duration int) { time.Sleep(time.Duration(duration) * time.Second) } // RandomInterval wait for the random interval lies between lower & upper bounds func RandomInterval(interval string) error { + re := regexp.MustCompile(`^\d+(-\d+)?$`) + if re.MatchString(interval) == false { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "could not parse CHAOS_INTERVAL env, bad input"} + } intervals := strings.Split(interval, "-") var lowerBound, upperBound int switch len(intervals) { @@ -49,6 +54,9 @@ func RandomInterval(interval string) error { return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "could not parse CHAOS_INTERVAL env, invalid format"} } rand.Seed(time.Now().UnixNano()) + if upperBound < 1 { + return cerrors.Error{ErrorCode: cerrors.ErrorTypeGeneric, Reason: "invalid CHAOS_INTERVAL env value, value below lower limit"} + } waitTime := lowerBound + rand.Intn(upperBound-lowerBound) log.Infof("[Wait]: Wait for the random chaos interval %vs", waitTime) WaitForDuration(waitTime) @@ -98,7 +106,7 @@ func AbortWatcherWithoutExit(expname string, clients clients.ClientSets, resultD } } -//FilterBasedOnPercentage return the slice of list based on the the provided percentage +// FilterBasedOnPercentage return the slice of list based on the the provided percentage func FilterBasedOnPercentage(percentage int, list []string) []string { var finalList []string @@ -175,7 +183,7 @@ func GetStatusMessage(defaultCheck bool, defaultMsg, probeStatus string) string return "Probes: " + probeStatus } -//GetRandomSequence will gives a random value for sequence +// GetRandomSequence will gives a random value for sequence func GetRandomSequence(sequence string) string { if strings.ToLower(sequence) == "random" { rand.Seed(time.Now().UnixNano()) @@ -186,7 +194,7 @@ func GetRandomSequence(sequence string) string { return sequence } -//ValidateRange validates the given range of numbers +// ValidateRange validates the given range of numbers func ValidateRange(a string) string { var lb, ub int intervals := strings.Split(a, "-") @@ -204,7 +212,7 @@ func ValidateRange(a string) string { } } -//getRandomValue gives a random value between two integers +// getRandomValue gives a random value between two integers func getRandomValue(a, b int) int { rand.Seed(time.Now().Unix()) return (a + rand.Intn(b-a+1)) diff --git a/pkg/utils/common/common_fuzz_test.go b/pkg/utils/common/common_fuzz_test.go new file mode 100644 index 000000000..1c7412019 --- /dev/null +++ b/pkg/utils/common/common_fuzz_test.go @@ -0,0 +1,171 @@ +package common + +import ( + "regexp" + "strconv" + "strings" + "testing" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "github.com/litmuschaos/litmus-go/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func FuzzRandomInterval(f *testing.F) { + testCases := []struct { + interval string + }{ + { + interval: "13", + }, + } + + for _, tc := range testCases { + f.Add(tc.interval) + } + + f.Fuzz(func(t *testing.T, interval string) { + re := regexp.MustCompile(`^\d+(-\d+)?$`) + intervals := strings.Split(interval, "-") + skip := false + if re.MatchString(interval) { + a, _ := strconv.Atoi(intervals[0]) + if len(intervals) == 2 { + b, _ := strconv.Atoi(intervals[1]) + if a > 5 || (b-a) > 5 || (b-a) < 1 { + skip = true + } + } else if len(intervals) == 1 { + if a > 5 || a < 1 { + skip = true + } + } + } + + if !skip { + err := RandomInterval(interval) + if re.MatchString(interval) == false { + assert.Error(t, err, "{\"errorCode\":\"GENERIC_ERROR\",\"reason\":\"could not parse CHAOS_INTERVAL env, bad input\"}") + return + } + + num, _ := strconv.Atoi(intervals[0]) + if num < 1 && err != nil { + assert.Error(t, err, "{\"errorCode\":\"GENERIC_ERROR\",\"reason\":\"invalid CHAOS_INTERVAL env value, value below lower limit\"}") + return + } else if num > 1 && err != nil { + t.Errorf("Unexpected Error: %v", err) + } + } + }) +} + +func FuzzGetContainerNames(f *testing.F) { + + f.Fuzz(func(t *testing.T, data []byte) { + fuzzConsumer := fuzz.NewConsumer(data) + targetStruct := &struct { + chaosDetails types.ChaosDetails + }{} + err := fuzzConsumer.GenerateStruct(targetStruct) + if err != nil { + return + } + names := GetContainerNames(&targetStruct.chaosDetails) + require.Equal(t, len(names), len(targetStruct.chaosDetails.SideCar)+1) + }) +} + +func FuzzGetSidecarVolumes(f *testing.F) { + + f.Fuzz(func(t *testing.T, data []byte) { + fuzzConsumer := fuzz.NewConsumer(data) + targetStruct := &struct { + chaosDetails types.ChaosDetails + }{} + err := fuzzConsumer.GenerateStruct(targetStruct) + if err != nil { + return + } + volumes := GetSidecarVolumes(&targetStruct.chaosDetails) + var volCounts = 0 + for _, s := range targetStruct.chaosDetails.SideCar { + volCounts += len(s.Secrets) + } + require.Equal(t, len(volumes), len(volumes)) + }) +} + +func FuzzBuildSidecar(f *testing.F) { + + f.Fuzz(func(t *testing.T, data []byte) { + fuzzConsumer := fuzz.NewConsumer(data) + targetStruct := &struct { + chaosDetails types.ChaosDetails + }{} + err := fuzzConsumer.GenerateStruct(targetStruct) + if err != nil { + return + } + containers := BuildSidecar(&targetStruct.chaosDetails) + require.Equal(t, len(containers), len(targetStruct.chaosDetails.SideCar)) + }) +} + +func FuzzContains(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + fuzzConsumer := fuzz.NewConsumer(data) + targetStruct := &struct { + val string + slice []string + }{} + err := fuzzConsumer.GenerateStruct(targetStruct) + if err != nil { + return + } + contains := Contains(targetStruct.val, targetStruct.slice) + for _, s := range targetStruct.slice { + if s == targetStruct.val { + require.True(t, contains) + return + } + } + require.False(t, contains) + }) +} + +func FuzzSubStringExistsInSlice(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + fuzzConsumer := fuzz.NewConsumer(data) + targetStruct := &struct { + val string + slice []string + }{} + err := fuzzConsumer.GenerateStruct(targetStruct) + if err != nil { + return + } + contains := SubStringExistsInSlice(targetStruct.val, targetStruct.slice) + for _, s := range targetStruct.slice { + if strings.Contains(s, targetStruct.val) { + require.True(t, contains) + return + } + } + require.False(t, contains) + }) +} + +func FuzzGetRandomSequence(f *testing.F) { + f.Add("random") + + f.Fuzz(func(t *testing.T, sequence string) { + val := GetRandomSequence(sequence) + if strings.ToLower(sequence) == "random" { + require.Contains(t, []string{"serial", "parallel"}, val) + return + } + require.Equal(t, sequence, val) + }) +} diff --git a/pkg/utils/common/nodes.go b/pkg/utils/common/nodes.go index 77951d0f5..5880d643d 100644 --- a/pkg/utils/common/nodes.go +++ b/pkg/utils/common/nodes.go @@ -19,7 +19,7 @@ import ( var err error -//GetNodeList check for the availability of the application node for the chaos execution +// GetNodeList check for the availability of the application node for the chaos execution // if the application node is not defined it will derive the random target node list using node affected percentage func GetNodeList(nodeNames, nodeLabel string, nodeAffPerc int, clients clients.ClientSets) ([]string, error) { @@ -60,7 +60,7 @@ func GetNodeList(nodeNames, nodeLabel string, nodeAffPerc int, clients clients.C return nodeList, nil } -//GetNodeName will select a random replica of application pod and return the node name of that application pod +// GetNodeName will select a random replica of application pod and return the node name of that application pod func GetNodeName(namespace, labels, nodeLabel string, clients clients.ClientSets) (string, error) { switch nodeLabel { diff --git a/pkg/utils/common/pid.go b/pkg/utils/common/pid.go index 8ff32eec1..86e367752 100644 --- a/pkg/utils/common/pid.go +++ b/pkg/utils/common/pid.go @@ -131,7 +131,7 @@ func getCRIOPID(containerID, socketPath, source string) (int, error) { return pid, nil } -//GetPauseAndSandboxPID extract out the PID of the target container +// GetPauseAndSandboxPID extract out the PID of the target container func GetPauseAndSandboxPID(runtime, containerID, socketPath, source string) (int, error) { var pid int diff --git a/pkg/utils/common/pods.go b/pkg/utils/common/pods.go index e00a15964..33882060b 100644 --- a/pkg/utils/common/pods.go +++ b/pkg/utils/common/pods.go @@ -562,22 +562,35 @@ func FilterPodsForNodes(targetPodList core_v1.PodList, containerName string) map for _, pod := range targetPodList.Items { - td := target{ - Name: pod.Name, - Namespace: pod.Namespace, - TargetContainer: containerName, - } + var containerNames []string - if td.TargetContainer == "" { - td.TargetContainer = pod.Spec.Containers[0].Name + switch containerName { + case "ALL": + for _, container := range pod.Spec.Containers { + containerNames = append(containerNames, container.Name) + } + case "": + containerNames = append(containerNames, pod.Spec.Containers[0].Name) + default: + containerNames = append(containerNames, containerName) } - if targets[pod.Spec.NodeName] == nil { - targets[pod.Spec.NodeName] = &TargetsDetails{ - Target: []target{td}, + for _, targetName := range containerNames { + + td := target{ + Name: pod.Name, + Namespace: pod.Namespace, + TargetContainer: targetName, } - } else { - targets[pod.Spec.NodeName].Target = append(targets[pod.Spec.NodeName].Target, td) + + if targets[pod.Spec.NodeName] == nil { + targets[pod.Spec.NodeName] = &TargetsDetails{ + Target: []target{td}, + } + } else { + targets[pod.Spec.NodeName].Target = append(targets[pod.Spec.NodeName].Target, td) + } + } } return targets diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go new file mode 100644 index 000000000..afa7a9560 --- /dev/null +++ b/pkg/utils/utils.go @@ -0,0 +1,11 @@ +package utils + +import "net/url" + +func HttpTimeout(err error) bool { + httpErr := err.(*url.Error) + if httpErr != nil { + return httpErr.Timeout() + } + return false +} diff --git a/pkg/workloads/workloads.go b/pkg/workloads/workloads.go index 89e5c499e..7ee89adb1 100644 --- a/pkg/workloads/workloads.go +++ b/pkg/workloads/workloads.go @@ -73,6 +73,7 @@ func getPodsFromWorkload(target types.AppDetails, allPods *kcorev1.PodList, dyna func GetPodOwnerTypeAndName(pod *kcorev1.Pod, dynamicClient dynamic.Interface) (parentType, parentName string, err error) { for _, owner := range pod.GetOwnerReferences() { parentName = owner.Name + parentType = strings.ToLower(owner.Kind) if owner.Kind == "StatefulSet" || owner.Kind == "DaemonSet" { return strings.ToLower(owner.Kind), parentName, nil }