From fe2f9a318d5239c13c7371edb0ccb0b23bb2e673 Mon Sep 17 00:00:00 2001 From: rksharma95 Date: Fri, 28 Jul 2023 13:13:45 +0530 Subject: [PATCH 1/6] add multienforcer controller Signed-off-by: rksharma95 --- KubeArmor/go.mod | 4 +- KubeArmor/go.sum | 8 +- .../helm/KubeArmor/templates/RBAC/roles.yaml | 8 + .../templates/clusterrole-rbac.yaml | 8 + pkg/KubeArmorController/Dockerfile | 3 + .../v1/zz_generated.deepcopy.go | 1 - pkg/KubeArmorController/common/common.go | 127 ++++++++++++ .../controllers/podrefresh_controller.go | 74 ++++++- pkg/KubeArmorController/go.mod | 22 ++- pkg/KubeArmorController/go.sum | 46 +++-- .../handlers/pod_mutation.go | 125 ++---------- .../informer/multienforcer_controller.go | 187 ++++++++++++++++++ .../informer/nodewatcher.go | 154 +++++++++++++++ pkg/KubeArmorController/main.go | 86 +++----- pkg/KubeArmorController/types/types.go | 26 +++ .../config/rbac/clusterrole.yaml | 8 + pkg/KubeArmorOperator/go.mod | 3 +- pkg/KubeArmorOperator/go.sum | 4 +- tests/go.mod | 2 +- tests/go.sum | 4 +- tests/k8s_env/Makefile | 2 +- tests/util/kartutil.go | 2 +- 22 files changed, 688 insertions(+), 216 deletions(-) create mode 100644 pkg/KubeArmorController/common/common.go create mode 100644 pkg/KubeArmorController/informer/multienforcer_controller.go create mode 100644 pkg/KubeArmorController/informer/nodewatcher.go create mode 100644 pkg/KubeArmorController/types/types.go diff --git a/KubeArmor/go.mod b/KubeArmor/go.mod index 812aee351f..583eaa8103 100644 --- a/KubeArmor/go.mod +++ b/KubeArmor/go.mod @@ -44,7 +44,7 @@ require ( k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 - k8s.io/cri-api v0.29.0 + k8s.io/cri-api v0.29.7 k8s.io/klog/v2 v2.120.0 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 sigs.k8s.io/controller-runtime v0.15.3 @@ -65,7 +65,7 @@ require ( github.com/evanphx/json-patch/v5 v5.7.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect diff --git a/KubeArmor/go.sum b/KubeArmor/go.sum index 51534be800..f4d440c004 100644 --- a/KubeArmor/go.sum +++ b/KubeArmor/go.sum @@ -58,8 +58,8 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -376,8 +376,8 @@ k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= -k8s.io/cri-api v0.29.0 h1:atenAqOltRsFqcCQlFFpDnl/R4aGfOELoNLTDJfd7t8= -k8s.io/cri-api v0.29.0/go.mod h1:Rls2JoVwfC7kW3tndm7267kriuRukQ02qfht0PCRuIc= +k8s.io/cri-api v0.29.7 h1:5X1Fid6oxYsP9/W1NtX0RYUefM2UNwaqfew8z7Pbf/M= +k8s.io/cri-api v0.29.7/go.mod h1:A6pdbjzML2xi9B0Clqn5qt1HJ3Ik12x2j+jv/TkqjRE= k8s.io/klog/v2 v2.120.0 h1:z+q5mfovBj1fKFxiRzsa2DsJLPIVMk/KFL81LMOfK+8= k8s.io/klog/v2 v2.120.0/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240105020646-a37d4de58910 h1:1Rp/XEKP5uxPs6QrsngEHAxBjaAR78iJRiJq5Fi7LSU= diff --git a/deployments/helm/KubeArmor/templates/RBAC/roles.yaml b/deployments/helm/KubeArmor/templates/RBAC/roles.yaml index ff219b5636..dc96cd7017 100644 --- a/deployments/helm/KubeArmor/templates/RBAC/roles.yaml +++ b/deployments/helm/KubeArmor/templates/RBAC/roles.yaml @@ -88,6 +88,14 @@ rules: - list - watch - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - security.kubearmor.com resources: diff --git a/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml b/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml index 8d97e87ea7..2f5042d356 100644 --- a/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml +++ b/deployments/helm/KubeArmorOperator/templates/clusterrole-rbac.yaml @@ -176,6 +176,14 @@ rules: - list - watch - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - security.kubearmor.com resources: diff --git a/pkg/KubeArmorController/Dockerfile b/pkg/KubeArmorController/Dockerfile index 392d680cf9..51a545cebc 100644 --- a/pkg/KubeArmorController/Dockerfile +++ b/pkg/KubeArmorController/Dockerfile @@ -17,6 +17,9 @@ COPY main.go main.go COPY api/ api/ COPY controllers/ controllers/ COPY handlers/ handlers/ +COPY informer/ informer/ +COPY types/ types/ +COPY common/ common/ # Build RUN CGO_ENABLED=0 GO111MODULE=on go build -a -o manager main.go diff --git a/pkg/KubeArmorController/api/security.kubearmor.com/v1/zz_generated.deepcopy.go b/pkg/KubeArmorController/api/security.kubearmor.com/v1/zz_generated.deepcopy.go index c0fc44008c..414ab4828b 100644 --- a/pkg/KubeArmorController/api/security.kubearmor.com/v1/zz_generated.deepcopy.go +++ b/pkg/KubeArmorController/api/security.kubearmor.com/v1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated // SPDX-License-Identifier: Apache-2.0 // Copyright 2022 Authors of KubeArmor diff --git a/pkg/KubeArmorController/common/common.go b/pkg/KubeArmorController/common/common.go new file mode 100644 index 0000000000..ff0e4c13b4 --- /dev/null +++ b/pkg/KubeArmorController/common/common.go @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + +package common + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +const k8sVisibility = "process,file,network,capabilities" +const appArmorAnnotation = "container.apparmor.security.beta.kubernetes.io/" +const KubeArmorRestartedAnnotation = "kubearmor.io/restarted" +const KubeArmorForceAppArmorAnnotation = "kubearmor.io/force-apparmor" + +// == Add AppArmor annotations == // +func AppArmorAnnotator(pod *corev1.Pod) { + podAnnotations := map[string]string{} + var podOwnerName string + + // podOwnerName is the pod name for static pods and parent object's name + // in other cases + for _, ownerRef := range pod.ObjectMeta.OwnerReferences { + // pod is owned by a replicaset, daemonset etc thus we use the managing + // controller's name + if ownerRef.Controller != nil && *ownerRef.Controller { + podOwnerName = ownerRef.Name + + if ownerRef.Kind == "ReplicaSet" { + // if it belongs to a replicaset, we also remove the pod template hash + podOwnerName = strings.TrimSuffix(podOwnerName, fmt.Sprintf("-%s", pod.ObjectMeta.Labels["pod-template-hash"])) + } + } + } + + if podOwnerName == "" { + // pod is standalone, name remains constant + podOwnerName = pod.ObjectMeta.Name + } + + // Get existant kubearmor annotations + for k, v := range pod.Annotations { + if strings.HasPrefix(k, appArmorAnnotation) { + if v == "unconfined" { + containerName := strings.Split(k, "/")[1] + podAnnotations[containerName] = v + } else { + containerName := strings.Split(k, "/")[1] + podAnnotations[containerName] = strings.Split(v, "/")[1] + } + } + } + + // Get the remaining containers / not addressed explecitly in the annotation + for _, container := range pod.Spec.Containers { + if _, ok := podAnnotations[container.Name]; !ok { + podAnnotations[container.Name] = "kubearmor-" + pod.Namespace + "-" + podOwnerName + "-" + container.Name + } + } + // Add kubearmor annotations to the pod + for k, v := range podAnnotations { + if v == "unconfined" { + continue + } + pod.Annotations[appArmorAnnotation+k] = "localhost/" + v + } +} +func AddCommonAnnotations(pod *corev1.Pod) { + if pod.Annotations == nil { + pod.Annotations = map[string]string{} + } + + // == Policy == // + + if _, ok := pod.Annotations["kubearmor-policy"]; !ok { + // if no annotation is set enable kubearmor by default + pod.Annotations["kubearmor-policy"] = "enabled" + } else if pod.Annotations["kubearmor-policy"] != "enabled" && pod.Annotations["kubearmor-policy"] != "disabled" && pod.Annotations["kubearmor-policy"] != "audited" { + // if kubearmor policy is not set correctly, default it to enabled + pod.Annotations["kubearmor-policy"] = "enabled" + } + // == Exception == // + + // exception: kubernetes app + if pod.Namespace == "kube-system" { + if _, ok := pod.Labels["k8s-app"]; ok { + pod.Annotations["kubearmor-policy"] = "audited" + } + + if value, ok := pod.Labels["component"]; ok { + if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" || value == "kube-proxy" { + pod.Annotations["kubearmor-policy"] = "audited" + } + } + } + + // exception: cilium-operator + if _, ok := pod.Labels["io.cilium/app"]; ok { + pod.Annotations["kubearmor-policy"] = "audited" + } + + // exception: kubearmor + if _, ok := pod.Labels["kubearmor-app"]; ok { + pod.Annotations["kubearmor-policy"] = "audited" + } + + // == Visibility == // + + if _, ok := pod.Annotations["kubearmor-visibility"]; !ok { + pod.Annotations["kubearmor-visibility"] = k8sVisibility + } +} + +func RemoveApparmorAnnotation(pod *corev1.Pod) { + annotations := []string{} + + for key := range pod.Annotations { + if strings.HasPrefix(key, "container.apparmor.security.beta.kubernetes.io/") { + annotations = append(annotations, key) + } + } + for _, key := range annotations { + delete(pod.Annotations, key) + } +} diff --git a/pkg/KubeArmorController/controllers/podrefresh_controller.go b/pkg/KubeArmorController/controllers/podrefresh_controller.go index c811d7d92f..0009fc05a2 100644 --- a/pkg/KubeArmorController/controllers/podrefresh_controller.go +++ b/pkg/KubeArmorController/controllers/podrefresh_controller.go @@ -5,10 +5,14 @@ package controllers import ( "context" - "strings" + "fmt" "time" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/common" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/informer" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -17,7 +21,8 @@ import ( type PodRefresherReconciler struct { client.Client - Scheme *runtime.Scheme + Scheme *runtime.Scheme + Cluster *types.Cluster } // +kubebuilder:rbac:groups="",resources=pods,verbs=get;watch;list;create;update;delete @@ -32,20 +37,57 @@ func (r *PodRefresherReconciler) Reconcile(ctx context.Context, req ctrl.Request log.Info("Watching for blocked pods") poddeleted := false for _, pod := range podList.Items { - if strings.Contains(pod.Status.Message, "Cannot enforce AppArmor") { + if pod.DeletionTimestamp != nil { + continue + } + if pod.Spec.NodeName == "" { + continue + } + r.Cluster.ClusterLock.RLock() + enforcer := "" + if _, ok := r.Cluster.Nodes[pod.Spec.NodeName]; ok { + enforcer = "apparmor" + } else { + enforcer = "bpf" + } + + r.Cluster.ClusterLock.RUnlock() + + if _, ok := pod.Annotations["kubearmor-policy"]; !ok { + orginalPod := pod.DeepCopy() + common.AddCommonAnnotations(&pod) + patch := client.MergeFrom(orginalPod) + err := r.Patch(ctx, &pod, patch) + if err != nil { + if !errors.IsNotFound(err) { + log.Info(fmt.Sprintf("Failed to patch pod annotations: %s", err.Error())) + } + } + } + + // restart not required for special pods and already annotated pods + + restartPod := requireRestart(pod, enforcer) + + if restartPod { + // for annotating pre-existing pods on apparmor-nodes // the pod is managed by a controller (e.g: replicaset) if pod.OwnerReferences != nil && len(pod.OwnerReferences) != 0 { log.Info("Deleting pod " + pod.Name + "in namespace " + pod.Namespace + " as it is managed") if err := r.Delete(ctx, &pod); err != nil { - log.Error(err, "Could not delete pod "+pod.Name+" in namespace "+pod.Namespace) + if !errors.IsNotFound(err) { + log.Error(err, "Could not delete pod "+pod.Name+" in namespace "+pod.Namespace) + } } } else { // single pods // mimic kubectl replace --force // delete the pod --force ==> grace period equals zero - log.Info("deleting single pod " + pod.Name + " in namespace " + pod.Namespace) + log.Info("Deleting single pod " + pod.Name + " in namespace " + pod.Namespace) if err := r.Delete(ctx, &pod, client.GracePeriodSeconds(0)); err != nil { - log.Error(err, "Could'nt delete pod "+pod.Name+" in namespace "+pod.Namespace) + if !errors.IsNotFound(err) { + log.Error(err, "Could'nt delete pod "+pod.Name+" in namespace "+pod.Namespace) + } } // clean the pre-polutated attributes @@ -72,3 +114,23 @@ func (r *PodRefresherReconciler) SetupWithManager(mgr ctrl.Manager) error { For(&corev1.Pod{}). Complete(r) } +func requireRestart(pod corev1.Pod, enforcer string) bool { + + if pod.Namespace == "kube-system" { + return false + } + if _, ok := pod.Labels["io.cilium/app"]; ok { + return false + } + + if _, ok := pod.Labels["kubearmor-app"]; ok { + return false + } + + // !hasApparmorAnnotations && enforcer == "apparmor" + if informer.HandleAppArmor(pod.Annotations) && enforcer == "apparmor" { + return true + } + + return false +} diff --git a/pkg/KubeArmorController/go.mod b/pkg/KubeArmorController/go.mod index a11b8b0ba9..20c689a22e 100644 --- a/pkg/KubeArmorController/go.mod +++ b/pkg/KubeArmorController/go.mod @@ -5,13 +5,14 @@ go 1.21.0 toolchain go1.21.12 require ( - github.com/go-logr/logr v1.4.1 + github.com/go-logr/logr v1.4.2 github.com/onsi/ginkgo/v2 v2.13.0 github.com/onsi/gomega v1.30.0 k8s.io/api v0.29.0 k8s.io/apiextensions-apiserver v0.29.0 k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 + k8s.io/cri-api v0.29.7 sigs.k8s.io/controller-runtime v0.15.3 sigs.k8s.io/yaml v1.4.0 ) @@ -31,12 +32,12 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -51,18 +52,21 @@ require ( github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/net v0.23.0 // indirect + golang.org/x/net v0.26.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.16.1 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/KubeArmorController/go.sum b/pkg/KubeArmorController/go.sum index 772997f9c2..2ea6eee87b 100644 --- a/pkg/KubeArmorController/go.sum +++ b/pkg/KubeArmorController/go.sum @@ -16,8 +16,8 @@ github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0n github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= @@ -34,8 +34,8 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -47,8 +47,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= @@ -96,8 +96,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -120,8 +120,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -136,18 +136,18 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -155,8 +155,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= -golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -165,10 +165,14 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -190,6 +194,8 @@ k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= +k8s.io/cri-api v0.29.7 h1:5X1Fid6oxYsP9/W1NtX0RYUefM2UNwaqfew8z7Pbf/M= +k8s.io/cri-api v0.29.7/go.mod h1:A6pdbjzML2xi9B0Clqn5qt1HJ3Ik12x2j+jv/TkqjRE= k8s.io/klog/v2 v2.120.0 h1:z+q5mfovBj1fKFxiRzsa2DsJLPIVMk/KFL81LMOfK+8= k8s.io/klog/v2 v2.120.0/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240105020646-a37d4de58910 h1:1Rp/XEKP5uxPs6QrsngEHAxBjaAR78iJRiJq5Fi7LSU= diff --git a/pkg/KubeArmorController/handlers/pod_mutation.go b/pkg/KubeArmorController/handlers/pod_mutation.go index 563a50f63e..17b911817d 100644 --- a/pkg/KubeArmorController/handlers/pod_mutation.go +++ b/pkg/KubeArmorController/handlers/pod_mutation.go @@ -6,11 +6,11 @@ package handlers import ( "context" "encoding/json" - "fmt" "net/http" - "strings" "github.com/go-logr/logr" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/common" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -18,15 +18,12 @@ import ( // PodAnnotator Structure type PodAnnotator struct { - Client client.Client - Decoder *admission.Decoder - Logger logr.Logger - Enforcer string + Client client.Client + Decoder *admission.Decoder + Logger logr.Logger + Cluster *types.Cluster } -const k8sVisibility = "process,file,network,capabilities" -const appArmorAnnotation = "container.apparmor.security.beta.kubernetes.io/" - // +kubebuilder:webhook:path=/mutate-pods,mutating=true,failurePolicy=Ignore,groups="",resources=pods,verbs=create;update,versions=v1,name=annotation.kubearmor.com,admissionReviewVersions=v1,sideEffects=NoneOnDryRun // Handle Pod Annotation @@ -42,59 +39,19 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss pod.Namespace = req.Namespace } - if pod.Annotations == nil { - pod.Annotations = map[string]string{} - } - - // == Policy == // - - if _, ok := pod.Annotations["kubearmor-policy"]; !ok { - // if no annotation is set enable kubearmor by default - pod.Annotations["kubearmor-policy"] = "enabled" - } else if pod.Annotations["kubearmor-policy"] != "enabled" && pod.Annotations["kubearmor-policy"] != "disabled" && pod.Annotations["kubearmor-policy"] != "audited" { - // if kubearmor policy is not set correctly, default it to enabled - pod.Annotations["kubearmor-policy"] = "enabled" - } - - // == LSM == // - - if a.Enforcer == "AppArmor" { - appArmorAnnotator(pod) - } - - // == Exception == // - - // exception: kubernetes app - if pod.Namespace == "kube-system" { - if _, ok := pod.Labels["k8s-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" - } - - if value, ok := pod.Labels["component"]; ok { - if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" { - pod.Annotations["kubearmor-policy"] = "audited" - } - } - } - - // exception: cilium-operator - if _, ok := pod.Labels["io.cilium/app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" - } - - // exception: kubearmor - if _, ok := pod.Labels["kubearmor-app"]; ok { - pod.Annotations["kubearmor-policy"] = "audited" - } + // == common annotations == // + common.AddCommonAnnotations(pod) - // == Visibility == // + // == Apparmor annotations == // + a.Cluster.ClusterLock.RLock() + homogenousApparmor := a.Cluster.HomogenousApparmor + a.Cluster.ClusterLock.RUnlock() - if _, ok := pod.Annotations["kubearmor-visibility"]; !ok { - pod.Annotations["kubearmor-visibility"] = k8sVisibility + if homogenousApparmor { + common.AppArmorAnnotator(pod) } // == // - // send the mutation response marshaledPod, err := json.Marshal(pod) if err != nil { @@ -102,57 +59,3 @@ func (a *PodAnnotator) Handle(ctx context.Context, req admission.Request) admiss } return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod) } - -// == Add AppArmor annotations == // -func appArmorAnnotator(pod *corev1.Pod) { - podAnnotations := map[string]string{} - var podOwnerName string - - // podOwnerName is the pod name for static pods and parent object's name - // in other cases - for _, ownerRef := range pod.ObjectMeta.OwnerReferences { - // pod is owned by a replicaset, daemonset etc thus we use the managing - // controller's name - if *ownerRef.Controller { - podOwnerName = ownerRef.Name - - if ownerRef.Kind == "ReplicaSet" { - // if it belongs to a replicaset, we also remove the pod template hash - podOwnerName = strings.TrimSuffix(podOwnerName, fmt.Sprintf("-%s", pod.ObjectMeta.Labels["pod-template-hash"])) - } - } - } - - if podOwnerName == "" { - // pod is standalone, name remains constant - podOwnerName = pod.ObjectMeta.Name - } - - // Get existant kubearmor annotations - for k, v := range pod.Annotations { - if strings.HasPrefix(k, appArmorAnnotation) { - if v == "unconfined" { - containerName := strings.Split(k, "/")[1] - podAnnotations[containerName] = v - } else { - containerName := strings.Split(k, "/")[1] - podAnnotations[containerName] = strings.Split(v, "/")[1] - } - } - } - - // Get the remaining containers / not addressed explecitly in the annotation - for _, container := range pod.Spec.Containers { - if _, ok := podAnnotations[container.Name]; !ok { - podAnnotations[container.Name] = "kubearmor-" + pod.Namespace + "-" + podOwnerName + "-" + container.Name - } - } - - // Add kubearmor annotations to the pod - for k, v := range podAnnotations { - if v == "unconfined" { - continue - } - pod.Annotations[appArmorAnnotation+k] = "localhost/" + v - } -} diff --git a/pkg/KubeArmorController/informer/multienforcer_controller.go b/pkg/KubeArmorController/informer/multienforcer_controller.go new file mode 100644 index 0000000000..b9171b51e3 --- /dev/null +++ b/pkg/KubeArmorController/informer/multienforcer_controller.go @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + +package informer + +import ( + "context" + "fmt" + "strings" + + "github.com/go-logr/logr" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/common" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +func hasApparmorAnnotation(annotations map[string]string) bool { + for key := range annotations { + if strings.HasPrefix(key, "container.apparmor.security.beta.kubernetes.io/") { + return true + } + } + return false +} + +func restartPod(c *kubernetes.Clientset, pod *corev1.Pod, apparmor bool, log *logr.Logger) { + name := pod.Name + pod.ResourceVersion = "" + pod.UID = "" + if pod.DeletionTimestamp != nil { + // pod is being deleted + return + } + if pod.Annotations == nil { + pod.Annotations = make(map[string]string) + } + + if pod.OwnerReferences != nil && len(pod.OwnerReferences) != 0 { + + pod.Name = "" + log.Info(fmt.Sprintf("Restarting pod %s", name)) + err := c.CoreV1().Pods(pod.Namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) + if err != nil { + log.Info(fmt.Sprintf("Error while deleting pod %s, error=%s", name, err.Error())) + return + } + + } else { + // Delete static pods + log.Info(fmt.Sprintf("Restarting static pod %s", name)) + err := c.CoreV1().Pods(pod.Namespace).Delete(context.Background(), name, metav1.DeleteOptions{GracePeriodSeconds: new(int64)}) + if err != nil { + log.Info(fmt.Sprintf("Error while deleting static pod %s, error=%s", name, err.Error())) + return + } + + } + /* + annotating pods with apparmor annotations beforehand this is + done due to annotating with mutating webhook can cause a endless loop + */ + if apparmor { + common.AppArmorAnnotator(pod) + } + _, err := c.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{}) + if err != nil { + log.Info(fmt.Sprintf("Error while restarting pod %s, error=%s", name, err.Error())) + return + } + + log.Info(fmt.Sprintf("Pod %s has been restarted", name)) +} + +func HandleAppArmor(annotations map[string]string) bool { + return !hasApparmorAnnotation(annotations) +} + +func HandleBPF(annotations map[string]string) bool { + return hasApparmorAnnotation(annotations) +} + +func IsAppArmorExempt(labels map[string]string, namespace string) bool { + + // exception: kubernetes app + if namespace == "kube-system" { + if _, ok := labels["k8s-app"]; ok { + return true + } + + if value, ok := labels["component"]; ok { + if value == "etcd" || value == "kube-apiserver" || value == "kube-controller-manager" || value == "kube-scheduler" || value == "kube-proxy" { + return true + } + } + } + + // exception: cilium-operator + if _, ok := labels["io.cilium/app"]; ok { + return true + } + + // exception: kubearmor + if _, ok := labels["kubearmor-app"]; ok { + return true + } + return false +} + +func handlePod(c *kubernetes.Clientset, pod *corev1.Pod, enforcer string, log *logr.Logger) { + if pod.DeletionTimestamp != nil { + // pod is being deleted + return + } + switch enforcer { + case "apparmor": + + if HandleAppArmor(pod.Annotations) && !IsAppArmorExempt(pod.Labels, pod.Namespace) { + restartPod(c, pod, true, log) + } + return + case "bpf": + if HandleBPF(pod.Annotations) { + + common.RemoveApparmorAnnotation(pod) + if !IsAppArmorExempt(pod.Labels, pod.Namespace) { + restartPod(c, pod, false, log) + } + } + default: + log.Info(fmt.Sprintf("Leaving pod %s as it is, could not determine the enforcer", pod.Name)) + } +} + +func PodWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logger) { + log.Info("Starting pod watcher") + + fact := informers.NewSharedInformerFactory(c, 0) + inf := fact.Core().V1().Pods().Informer() + + inf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + cluster.ClusterLock.RLock() + defer cluster.ClusterLock.RUnlock() + if cluster.HomogeneousStatus { + return + } + if pod, ok := obj.(*corev1.Pod); ok { + if pod.Spec.NodeName != "" { + nodeEnforcer := "" + if _, ok := cluster.Nodes[pod.Spec.NodeName]; ok { + nodeEnforcer = "apparmor" + } else { + nodeEnforcer = "bpf" + } + log.Info(fmt.Sprintf("New pod was added, name=%s enforcer=%s", pod.Name, nodeEnforcer)) + handlePod(c, pod, nodeEnforcer, &log) + } + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + cluster.ClusterLock.RLock() + defer cluster.ClusterLock.RUnlock() + if cluster.HomogeneousStatus { + return + } + if pod, ok := newObj.(*corev1.Pod); ok { + if pod.Spec.NodeName != "" { + nodeEnforcer := "" + if _, ok := cluster.Nodes[pod.Spec.NodeName]; ok { + nodeEnforcer = "apparmor" + } else { + nodeEnforcer = "bpf" + } + log.Info(fmt.Sprintf("pod was updated, name=%s enforcer=%s", pod.Name, nodeEnforcer)) + handlePod(c, pod, nodeEnforcer, &log) + } + } + }, + }) + + inf.Run(wait.NeverStop) +} diff --git a/pkg/KubeArmorController/informer/nodewatcher.go b/pkg/KubeArmorController/informer/nodewatcher.go new file mode 100644 index 0000000000..bfca7eb034 --- /dev/null +++ b/pkg/KubeArmorController/informer/nodewatcher.go @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + +package informer + +import ( + "fmt" + "sync" + + "github.com/go-logr/logr" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/types" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +func InitCluster() types.Cluster { + return types.Cluster{ + Nodes: make(map[string]string), + HomogeneousStatus: true, + ClusterLock: &sync.RWMutex{}, + HomogenousApparmor: false, + TotalNodes: 0, + } +} + +func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logger) { + log.Info("Starting node watcher") + + fact := informers.NewSharedInformerFactory(c, 0) + inf := fact.Core().V1().Nodes().Informer() + + inf.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if node, ok := obj.(*corev1.Node); ok { + if node.Labels != nil { + if enforcer, ok := node.Labels["kubearmor.io/enforcer"]; ok { + + log.Info(fmt.Sprintf("New node was added, name=%s enforcer=%s", node.Name, enforcer)) + cluster.ClusterLock.Lock() + defer cluster.ClusterLock.Unlock() + cluster.TotalNodes++ + + if enforcer == "apparmor" { + cluster.Nodes[node.Name] = enforcer + } + // re-compute homogeneous status + homogeneous := true + homogeneousApparmor := false + + if len(cluster.Nodes) > 0 && cluster.TotalNodes != len(cluster.Nodes) { + homogeneous = false + log.Info(fmt.Sprintf("Cluster in a non homogeneus state with %d nodes", cluster.TotalNodes)) + } + cluster.HomogeneousStatus = homogeneous + + if homogeneous { + if enforcer == "apparmor" { + homogeneousApparmor = true + log.Info("Cluster in a homogeneus state with apparmor enforcer") + } else { + log.Info("Cluster in a homogeneus state") + } + } + cluster.HomogenousApparmor = homogeneousApparmor + } + } + } + }, + UpdateFunc: func(oldObj, newObj interface{}) { + if node, ok := newObj.(*corev1.Node); ok { + if node.Labels == nil { + return + } + cluster.ClusterLock.Lock() + defer cluster.ClusterLock.Unlock() + if _, ok := cluster.Nodes[node.Name]; !ok { + return + } + + if enforcer, ok := node.Labels["kubearmor.io/enforcer"]; ok { + if _, ok := cluster.Nodes[node.Name]; ok { + // in case the enforcer has been updated to bpflsm from apparmor + if enforcer != cluster.Nodes[node.Name] { + delete(cluster.Nodes, node.Name) + } + } else { + if enforcer == "apparmor" { + cluster.Nodes[node.Name] = enforcer + } + } + // re-compute homogeneous status + homogeneous := true + homogeneousApparmor := false + + if len(cluster.Nodes) > 0 && cluster.TotalNodes != len(cluster.Nodes) { + homogeneous = false + log.Info(fmt.Sprintf("Cluster in a non homogeneus state with %d nodes", cluster.TotalNodes)) + } + + cluster.HomogeneousStatus = homogeneous + if homogeneous { + if enforcer == "apparmor" { + homogeneousApparmor = true + log.Info("Cluster in a homogeneus state with apparmor enforcer") + } else { + log.Info("Cluster in a homogeneus state") + } + } + cluster.HomogenousApparmor = homogeneousApparmor + } + } + }, + DeleteFunc: func(obj interface{}) { + if node, ok := obj.(*corev1.Node); ok { + cluster.ClusterLock.Lock() + defer cluster.ClusterLock.Unlock() + + cluster.TotalNodes-- + + delete(cluster.Nodes, node.Name) + + if len(cluster.Nodes) == 0 && cluster.TotalNodes == 0 { + return + } + // re-compute homogeneous status + homogeneous := true + homogeneousApparmor := false + + if len(cluster.Nodes) > 0 && cluster.TotalNodes != len(cluster.Nodes) { + homogeneous = false + log.Info(fmt.Sprintf("Cluster in a non homogeneus state with %d nodes", cluster.TotalNodes)) + } + cluster.HomogeneousStatus = homogeneous + if homogeneous { + if cluster.TotalNodes == len(cluster.Nodes) { + homogeneousApparmor = true + log.Info("Cluster in a homogeneus state with apparmor enforcer") + } else { + log.Info("Cluster in a homogeneus state") + } + } + cluster.HomogenousApparmor = homogeneousApparmor + + } + }, + }) + + inf.Run(wait.NeverStop) +} + +// add fall back logix for pod annotataions diff --git a/pkg/KubeArmorController/main.go b/pkg/KubeArmorController/main.go index 34afefb761..39b65a7762 100644 --- a/pkg/KubeArmorController/main.go +++ b/pkg/KubeArmorController/main.go @@ -6,12 +6,10 @@ package main import ( "flag" "os" - "path/filepath" - "strings" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. - + "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/apimachinery/pkg/runtime" @@ -23,10 +21,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/go-logr/logr" securityv1 "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/api/security.kubearmor.com/v1" "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/controllers" "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/handlers" + "github.com/kubearmor/KubeArmor/pkg/KubeArmorController/informer" //+kubebuilder:scaffold:imports ) @@ -92,25 +90,6 @@ func main() { os.Exit(1) } - setupLog.Info("Adding mutation webhook") - mgr.GetWebhookServer().Register("/mutate-pods", &webhook.Admission{ - Handler: &handlers.PodAnnotator{ - Client: mgr.GetClient(), - Logger: setupLog, - Enforcer: detectEnforcer(setupLog), - Decoder: admission.NewDecoder(mgr.GetScheme()), - }, - }) - - setupLog.Info("Adding pod refresher controller") - if err = (&controllers.PodRefresherReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Pod") - os.Exit(1) - } - setupLog.Info("Adding KubeArmor Host policy controller") if err = (&controllers.KubeArmorHostPolicyReconciler{ Client: mgr.GetClient(), @@ -130,43 +109,42 @@ func main() { os.Exit(1) } - //+kubebuilder:scaffold:builder - - setupLog.Info("starting manager") - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { - setupLog.Error(err, "problem running manager") + client, err := kubernetes.NewForConfig(mgr.GetConfig()) + if err != nil { + setupLog.Error(err, "Cannot init kuberntes client") os.Exit(1) } -} -// detect the enforcer on the node -func detectEnforcer(logger logr.Logger) string { - // assumption: all nodes have the same OSes + cluster := informer.InitCluster() + setupLog.Info("Starting node watcher") + go informer.NodeWatcher(client, &cluster, ctrl.Log.WithName("informer").WithName("NodeWatcher")) + setupLog.Info("Starting pod watcher") + go informer.PodWatcher(client, &cluster, ctrl.Log.WithName("informer").WithName("PodWatcher")) - lsm := []byte{} - lsmPath := "/sys/kernel/security/lsm" + setupLog.Info("Adding mutation webhook") + mgr.GetWebhookServer().Register("/mutate-pods", &webhook.Admission{ + Handler: &handlers.PodAnnotator{ + Client: mgr.GetClient(), + Logger: setupLog, + Decoder: admission.NewDecoder(mgr.GetScheme()), + Cluster: &cluster, + }, + }) - if _, err := os.Stat(filepath.Clean(lsmPath)); err == nil { - lsm, err = os.ReadFile(lsmPath) - if err != nil { - logger.Info("Failed to read /sys/kernel/security/lsm " + err.Error()) - return "" - } + setupLog.Info("Adding pod refresher controller") + if err = (&controllers.PodRefresherReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Cluster: &cluster, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Pod") + os.Exit(1) } + //+kubebuilder:scaffold:builder - enforcer := string(lsm) - - if strings.Contains(enforcer, "bpf") { - logger.Info("Detected BPFLSM as the cluster Enforcer") - return "BPFLSM" - } else if strings.Contains(enforcer, "apparmor") { - logger.Info("Detected AppArmor as the cluster Enforcer") - return "AppArmor" - } else if strings.Contains(enforcer, "selinux") { - logger.Info("Detected SELinux as the cluster Enforcer") - return "SELinux" + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + os.Exit(1) } - - logger.Info("No enforcer was detected") - return "" } diff --git a/pkg/KubeArmorController/types/types.go b/pkg/KubeArmorController/types/types.go new file mode 100644 index 0000000000..cf6fd5c7a4 --- /dev/null +++ b/pkg/KubeArmorController/types/types.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor +package types + +import ( + "sync" + + "github.com/go-logr/logr" + "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" +) + +type Cluster struct { + Nodes map[string]string + HomogeneousStatus bool // the cluster runs the same enforcer + HomogenousApparmor bool // the cluster runs with apparmor enforcer + ClusterLock *sync.RWMutex + TotalNodes int //total no of nodes present +} + +type MultiEnforcerController struct { + Client kubernetes.Clientset + Log logr.Logger + Cluster Cluster + PodLister v1.PodLister +} diff --git a/pkg/KubeArmorOperator/config/rbac/clusterrole.yaml b/pkg/KubeArmorOperator/config/rbac/clusterrole.yaml index 7d91fe7466..3e13102ac5 100644 --- a/pkg/KubeArmorOperator/config/rbac/clusterrole.yaml +++ b/pkg/KubeArmorOperator/config/rbac/clusterrole.yaml @@ -170,6 +170,14 @@ rules: - list - watch - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch - apiGroups: - security.kubearmor.com resources: diff --git a/pkg/KubeArmorOperator/go.mod b/pkg/KubeArmorOperator/go.mod index 9d1e513fe8..73f40ccce3 100644 --- a/pkg/KubeArmorOperator/go.mod +++ b/pkg/KubeArmorOperator/go.mod @@ -31,7 +31,7 @@ require ( github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.9 // indirect @@ -63,7 +63,6 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.18.2 // indirect - github.com/stretchr/testify v1.9.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect diff --git a/pkg/KubeArmorOperator/go.sum b/pkg/KubeArmorOperator/go.sum index 4498622320..e34d49cfac 100644 --- a/pkg/KubeArmorOperator/go.sum +++ b/pkg/KubeArmorOperator/go.sum @@ -13,8 +13,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= diff --git a/tests/go.mod b/tests/go.mod index 3a78581203..2f93de57f1 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -5,7 +5,7 @@ go 1.21.0 toolchain go1.21.12 replace ( - github.com/cilium/cilium => github.com/cilium/cilium v1.14.8 + github.com/cilium/cilium => github.com/cilium/cilium v1.14.12 github.com/cilium/proxy => github.com/cilium/proxy v0.0.0-20231218064853-ea8cba5b690b github.com/kubearmor/KubeArmor/pkg/KubeArmorController => ../pkg/KubeArmorController github.com/kubearmor/KubeArmor/protobuf => ../protobuf diff --git a/tests/go.sum b/tests/go.sum index fb44fe4425..f70f29ae8f 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= github.com/cilium/checkmate v1.0.3/go.mod h1:KiBTasf39/F2hf2yAmHw21YFl3hcEyP4Yk6filxc12A= -github.com/cilium/cilium v1.14.8 h1:Jm54iA7XxWJn+GdZrfzcEyeoNUb58w8y0g8ZB2lVcJw= -github.com/cilium/cilium v1.14.8/go.mod h1:7cWSSEl+dU9Q5CqnEWT//c0w8yLSazShw287Uop6xVs= +github.com/cilium/cilium v1.14.12 h1:lQ4XilTUJK5R6BrZnSm4pYxj6jsBQFWlBuRHA5FHJ1I= +github.com/cilium/cilium v1.14.12/go.mod h1:Pjy+qd1hrrXulp78Hs76ahKCttij64LvjxFui9XquVA= github.com/cilium/ebpf v0.13.2 h1:uhLimLX+jF9BTPPvoCUYh/mBeoONkjgaJ9w9fn0mRj4= github.com/cilium/ebpf v0.13.2/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/cilium/proxy v0.0.0-20231218064853-ea8cba5b690b h1:BmpBAY7GUsb86lNpPjVJAB/S00ER9kSCtyleqCO3M3U= diff --git a/tests/k8s_env/Makefile b/tests/k8s_env/Makefile index ef96bc28e4..8e8a090053 100644 --- a/tests/k8s_env/Makefile +++ b/tests/k8s_env/Makefile @@ -6,7 +6,7 @@ build: @go mod tidy # run in two steps as syscall suite fails if run at the very end # see - https://github.com/kubearmor/KubeArmor/issues/1269 - @ginkgo --vv --flake-attempts=10 --timeout=10m syscalls/ + @ginkgo --vv --flake-attempts=10 --timeout=15m syscalls/ @ginkgo -r --vv --flake-attempts=10 --timeout=30m --skip-package "syscalls" .PHONY: test test: diff --git a/tests/util/kartutil.go b/tests/util/kartutil.go index 5c3c1d599a..345fef4646 100644 --- a/tests/util/kartutil.go +++ b/tests/util/kartutil.go @@ -266,7 +266,7 @@ func K8sGetPods(podstr string, ns string, ants []string, timeout int) ([]string, pods := []string{} log.Printf("K8sGetPods pod=%s ns=%s ants=%v timeout=%d", podstr, ns, ants, timeout) for t := 0; t <= timeout; t++ { - podList, err := k8sClient.K8sClientset.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) + podList, err := k8sClient.K8sClientset.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{}) if err != nil { log.Errorf("k8s list pods failed. error=%s", err) return nil, err From 198b811f5bf01a10bad624ce75744911a31a9fd3 Mon Sep 17 00:00:00 2001 From: Aryan-sharma11 Date: Thu, 25 Jul 2024 01:34:28 +0530 Subject: [PATCH 2/6] feat : add annotate resources flag Signed-off-by: Aryan-sharma11 --- KubeArmor/config/config.go | 15 ++++++++++++--- KubeArmor/core/kubeUpdate.go | 9 ++++++--- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/KubeArmor/config/config.go b/KubeArmor/config/config.go index 1b81c7fb3c..b774c29ccf 100644 --- a/KubeArmor/config/config.go +++ b/KubeArmor/config/config.go @@ -56,9 +56,10 @@ type KubearmorConfig struct { StateAgent bool // enable KubeArmor state agent - AlertThrottling bool // Enable/Disable Alert Throttling - MaxAlertPerSec int // Maximum alerts allowed per second - ThrottleSec int // Number of seconds for which subsequent alerts will be dropped + AlertThrottling bool // Enable/Disable Alert Throttling + MaxAlertPerSec int // Maximum alerts allowed per second + ThrottleSec int // Number of seconds for which subsequent alerts will be dropped + AnnotateResources bool // enable annotations by kubearmor if kubearmor-controller is not present } // GlobalCfg Global configuration for Kubearmor @@ -103,6 +104,7 @@ const ( ConfigAlertThrottling string = "alertThrottling" ConfigMaxAlertPerSec string = "maxAlertPerSec" ConfigThrottleSec string = "throttleSec" + ConfigAnnotateResources string = "annotateResources" ) func readCmdLineParams() { @@ -157,6 +159,8 @@ func readCmdLineParams() { throttleSec := flag.Int(ConfigThrottleSec, 30, "Time period for which subsequent alerts will be dropped (in sec)") + annotateResources := flag.Bool(ConfigAnnotateResources, false, "for kubearmor deployment without kubearmor-controller") + flags := []string{} flag.VisitAll(func(f *flag.Flag) { kv := fmt.Sprintf("%s:%v", f.Name, f.Value) @@ -212,8 +216,12 @@ func readCmdLineParams() { viper.SetDefault(ConfigStateAgent, *stateAgent) viper.SetDefault(ConfigAlertThrottling, *alertThrottling) + viper.SetDefault(ConfigMaxAlertPerSec, *maxAlertPerSec) + viper.SetDefault(ConfigThrottleSec, *throttleSec) + + viper.SetDefault(ConfigAnnotateResources, *annotateResources) } // LoadConfig Load configuration @@ -312,6 +320,7 @@ func LoadConfig() error { GlobalCfg.AlertThrottling = viper.GetBool(ConfigAlertThrottling) GlobalCfg.MaxAlertPerSec = viper.GetInt(ConfigMaxAlertPerSec) GlobalCfg.ThrottleSec = viper.GetInt(ConfigThrottleSec) + GlobalCfg.AnnotateResources = viper.GetBool(ConfigAnnotateResources) kg.Printf("Final Configuration [%+v]", GlobalCfg) diff --git a/KubeArmor/core/kubeUpdate.go b/KubeArmor/core/kubeUpdate.go index f3d345d1a3..e9edc078b7 100644 --- a/KubeArmor/core/kubeUpdate.go +++ b/KubeArmor/core/kubeUpdate.go @@ -839,7 +839,9 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { dm.RuntimeEnforcer.UpdateAppArmorProfiles(pod.Metadata["podName"], "ADDED", appArmorAnnotations, pod.PrivilegedAppArmorProfiles) if updateAppArmor && pod.Annotations["kubearmor-policy"] == "enabled" && dm.OwnerInfo[pod.Metadata["podName"]].Ref != "Pod" { - if dm.OwnerInfo[pod.Metadata["podName"]].Name != "" { + + // patch deployments only when kubearmor-controller is not present + if dm.OwnerInfo[pod.Metadata["podName"]].Name != "" && cfg.GlobalCfg.AnnotateResources { deploymentName := dm.OwnerInfo[pod.Metadata["podName"]].Name // patch the deployment with apparmor annotations if err := K8s.PatchResourceWithAppArmorAnnotations(pod.Metadata["namespaceName"], deploymentName, appArmorAnnotations, dm.OwnerInfo[pod.Metadata["podName"]].Ref); err != nil { @@ -860,7 +862,9 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { } if updateAppArmor && prevPolicyEnabled != "enabled" && pod.Annotations["kubearmor-policy"] == "enabled" && dm.OwnerInfo[pod.Metadata["podName"]].Ref != "Pod" { - if dm.OwnerInfo[pod.Metadata["podName"]].Name != "" { + + // patch deployments only when kubearmor-controller is not present + if dm.OwnerInfo[pod.Metadata["podName"]].Name != "" && cfg.GlobalCfg.AnnotateResources { deploymentName := dm.OwnerInfo[pod.Metadata["podName"]].Name // patch the deployment with apparmor annotations if err := K8s.PatchResourceWithAppArmorAnnotations(pod.Metadata["namespaceName"], deploymentName, appArmorAnnotations, dm.OwnerInfo[pod.Metadata["podName"]].Ref); err != nil { @@ -885,7 +889,6 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { if event.Type == "ADDED" { new := true - for _, k8spod := range dm.K8sPods { if k8spod.Metadata["namespaceName"] == pod.Metadata["namespaceName"] && k8spod.Metadata["podName"] == pod.Metadata["podName"] { new = false From 62d8b61a2c281ee854b3c8c93c50501695d63c03 Mon Sep 17 00:00:00 2001 From: Aryan-sharma11 Date: Thu, 25 Jul 2024 01:35:33 +0530 Subject: [PATCH 3/6] remove /sys/kernel/security hostmount Signed-off-by: Aryan-sharma11 --- deployments/get/objects.go | 21 +++++-------------- .../config/manager/manager.yaml | 8 ------- .../internal/controller/resources.go | 10 --------- 3 files changed, 5 insertions(+), 34 deletions(-) diff --git a/deployments/get/objects.go b/deployments/get/objects.go index 24cef66797..102697604d 100644 --- a/deployments/get/objects.go +++ b/deployments/get/objects.go @@ -507,16 +507,6 @@ var KubeArmorControllerCertVolume = corev1.Volume{ }, } -var KubeArmorControllerHostPathVolume = corev1.Volume{ - Name: "sys-path", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/sys/kernel/security", - Type: &hostPathDirectory, - }, - }, -} - var KubeArmorControllerAllowPrivilegeEscalation = false // GetKubeArmorControllerDeployment Function @@ -549,7 +539,6 @@ func GetKubeArmorControllerDeployment(namespace string) *appsv1.Deployment { ServiceAccountName: KubeArmorControllerServiceAccountName, Volumes: []corev1.Volume{ KubeArmorControllerCertVolume, - KubeArmorControllerHostPathVolume, }, Containers: []corev1.Container{ { @@ -600,11 +589,6 @@ func GetKubeArmorControllerDeployment(namespace string) *appsv1.Deployment { ReadOnly: true, MountPath: "/tmp/k8s-webhook-server/serving-certs", }, - { - Name: KubeArmorControllerHostPathVolume.Name, - ReadOnly: true, - MountPath: "/sys/kernel/security", - }, }, SecurityContext: &corev1.SecurityContext{ AllowPrivilegeEscalation: &KubeArmorControllerAllowPrivilegeEscalation, @@ -674,6 +658,11 @@ func GetKubeArmorControllerClusterRole() *rbacv1.ClusterRole { Resources: []string{"pods"}, Verbs: []string{"create", "delete", "get", "patch", "list", "watch", "update"}, }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"get", "list", "watch"}, + }, { APIGroups: []string{"security.kubearmor.com"}, Resources: []string{"kubearmorpolicies", "kubearmorclusterpolicies", "kubearmorhostpolicies"}, diff --git a/pkg/KubeArmorController/config/manager/manager.yaml b/pkg/KubeArmorController/config/manager/manager.yaml index ab0575c621..8bfc3acb4f 100644 --- a/pkg/KubeArmorController/config/manager/manager.yaml +++ b/pkg/KubeArmorController/config/manager/manager.yaml @@ -26,11 +26,6 @@ spec: labels: control-plane: controller-manager spec: - volumes: - - name: sys-path - hostPath: - path: /sys/kernel/security - type: Directory securityContext: runAsNonRoot: true # TODO(user): For common cases that do not require escalating privileges @@ -74,8 +69,5 @@ spec: requests: cpu: 10m memory: 64Mi - volumeMounts: - - mountPath: /sys/kernel/security - name: sys-path serviceAccountName: controller-manager terminationGracePeriodSeconds: 10 diff --git a/pkg/KubeArmorOperator/internal/controller/resources.go b/pkg/KubeArmorOperator/internal/controller/resources.go index 0c99bd0f42..e848be8333 100644 --- a/pkg/KubeArmorOperator/internal/controller/resources.go +++ b/pkg/KubeArmorOperator/internal/controller/resources.go @@ -444,16 +444,6 @@ func (clusterWatcher *ClusterWatcher) deployControllerDeployment(deployment *app } } else { deployment.Spec.Template.Spec.NodeSelector = nil - for i, container := range deployment.Spec.Template.Spec.Containers { - if container.Name == "manager" { - for j, mount := range container.VolumeMounts { - if mount.MountPath == "/sys/kernel/security" { - deployment.Spec.Template.Spec.Containers[i].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[i].VolumeMounts[:j], - deployment.Spec.Template.Spec.Containers[i].VolumeMounts[j+1:]...) - } - } - } - } } controller, err := clusterWatcher.Client.AppsV1().Deployments(common.Namespace).Get(context.Background(), deployment.Name, metav1.GetOptions{}) if isNotfound(err) { From deb1675892c69ccddc2622bd066730c03baabf86 Mon Sep 17 00:00:00 2001 From: Aryan-sharma11 Date: Thu, 25 Jul 2024 01:36:40 +0530 Subject: [PATCH 4/6] deploy snitch on node restarts Signed-off-by: Aryan-sharma11 --- pkg/KubeArmorOperator/common/defaults.go | 5 ++ .../internal/controller/cluster.go | 47 +++++++++++++++++++ 2 files changed, 52 insertions(+) diff --git a/pkg/KubeArmorOperator/common/defaults.go b/pkg/KubeArmorOperator/common/defaults.go index dba7e4a45c..d77f44842e 100644 --- a/pkg/KubeArmorOperator/common/defaults.go +++ b/pkg/KubeArmorOperator/common/defaults.go @@ -53,6 +53,11 @@ var ( SecurityFsLabel string = "kubearmor.io/securityfs" SeccompLabel string = "kubearmor.io/seccomp" + // node taints label + NotreadyTaint string = "node.kubernetes.io/not-ready" + UnreachableTaint string = "node.kubernetes.io/unreachable" + UnschedulableTaint string = "node.kubernetes.io/unschedulable" + // if any node with securityfs/lsm present IfNodeWithSecurtiyFs bool = false diff --git a/pkg/KubeArmorOperator/internal/controller/cluster.go b/pkg/KubeArmorOperator/internal/controller/cluster.go index 811bcdd9a9..9019c3f598 100644 --- a/pkg/KubeArmorOperator/internal/controller/cluster.go +++ b/pkg/KubeArmorOperator/internal/controller/cluster.go @@ -109,10 +109,27 @@ func (clusterWatcher *ClusterWatcher) WatchNodes() { } }, UpdateFunc: func(oldObj, newObj interface{}) { + if node, ok := newObj.(*corev1.Node); ok { oldRand := "" if old, ok := oldObj.(*corev1.Node); ok { oldRand = old.Labels[common.RandLabel] + + nodeRestart := checkNodeRestart(node, old) + if nodeRestart { + runtime := node.Status.NodeInfo.ContainerRuntimeVersion + runtime = strings.Split(runtime, ":")[0] + clusterWatcher.Log.Infof("Node might have been restarted, redeploying snitch ") + if val, ok := node.Labels[common.OsLabel]; ok && val == "linux" { + log.Infof("Installing snitch on node %s", node.Name) + _, err := clusterWatcher.Client.BatchV1().Jobs(common.Namespace).Create(context.Background(), deploySnitch(node.Name, runtime), v1.CreateOptions{}) + if err != nil { + log.Errorf("Cannot run snitch on node %s, error=%s", node.Name, err.Error()) + return + } + log.Infof("Snitch was installed on node %s", node.Name) + } + } } if val, ok := node.Labels[common.OsLabel]; ok && val == "linux" && oldRand != node.Labels[common.RandLabel] { newNode := Node{} @@ -871,3 +888,33 @@ func UpdateTlsData(config *opv1.KubeArmorConfigSpec) bool { return updated } +func checkNodeRestart(new, old *corev1.Node) bool { + + oldTaints := false + newTaints := false + + for _, val := range old.Spec.Taints { + if val.Key == common.NotreadyTaint || val.Key == common.UnreachableTaint || val.Key == common.UnschedulableTaint { + oldTaints = true + break + } + + } + for _, val := range new.Spec.Taints { + if val.Key == common.NotreadyTaint || val.Key == common.UnreachableTaint || val.Key == common.UnschedulableTaint { + newTaints = true + break + } + } + /* Based on observation that when a node is restarted an update event + is generated with old node having following node taints + "node.kubernetes.io/not-ready" , "node.kubernetes.io/unreachable", "node.kubernetes.io/unschedulable" + and new node having none of these taints + */ + if oldTaints && !newTaints { + // node might have been restarted + return true + } + + return false +} From 7894d00366df8f3f72ff9351fb0c736e603678fd Mon Sep 17 00:00:00 2001 From: Aryan-sharma11 Date: Thu, 25 Jul 2024 01:37:34 +0530 Subject: [PATCH 5/6] configure ci to build controller when controller pkg is updated Signed-off-by: Aryan-sharma11 --- .github/workflows/ci-test-ginkgo.yml | 35 +++++++++++++++++-- .github/workflows/ci-test-ubi-image.yml | 27 +++++++++++++- .../informer/nodewatcher.go | 3 -- 3 files changed, 58 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci-test-ginkgo.yml b/.github/workflows/ci-test-ginkgo.yml index 698db6d803..5243c182f9 100644 --- a/.github/workflows/ci-test-ginkgo.yml +++ b/.github/workflows/ci-test-ginkgo.yml @@ -42,6 +42,14 @@ jobs: - uses: actions/setup-go@v5 with: go-version-file: 'KubeArmor/go.mod' + + - name: Check what paths were updated + uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + controller: + - 'pkg/KubeArmorController/**' - name: Install the latest LLVM toolchain run: ./.github/workflows/install-llvm.sh @@ -60,6 +68,10 @@ jobs: working-directory: pkg/KubeArmorOperator run: | make docker-build + + - name: Build KubeArmorController + if: steps.filter.outputs.controller == 'true' + run: make -C pkg/KubeArmorController/ docker-build TAG=latest - name: deploy pre existing pod run: | @@ -69,11 +81,15 @@ jobs: - name: Run KubeArmor run: | - if [ ${{ matrix.runtime }} == "containerd" ]; then + if [[ ${{ matrix.runtime }} == "containerd" ]]; then docker save kubearmor/kubearmor-init:latest | sudo k3s ctr images import - docker save kubearmor/kubearmor:latest | sudo k3s ctr images import - docker save kubearmor/kubearmor-operator:latest | sudo k3s ctr images import - docker save kubearmor/kubearmor-snitch:latest | sudo k3s ctr images import - + + if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then + docker save kubearmor/kubearmor-controller:latest | sudo k3s ctr images import - + fi else if [ ${{ matrix.runtime }} == "crio" ]; then docker save kubearmor/kubearmor-init:latest | sudo podman load @@ -84,12 +100,25 @@ jobs: sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-operator:latest docker save kubearmor/kubearmor-snitch:latest | sudo podman load sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-snitch:latest + if [ ${{ steps.filter.outputs.controller }} == 'true' ]; then + docker save kubearmor/kubearmor-controller:latest | sudo podman load + sudo podman tag localhost/latest:latest docker.io/kubearmor/kubearmor-controller:latest + fi fi fi - helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest + docker system prune -a -f + docker buildx prune -a -f + helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator kubectl get pods -A - kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml + if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then + kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml --dry-run=client -o json | \ + jq '.spec.kubearmorControllerImage.imagePullPolicy = "Never"' | \ + kubectl apply -f - + else + kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-test.yaml + fi + kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test kubectl wait --timeout=7m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch,kubearmor-app!=kubearmor-controller -n kubearmor kubectl wait --timeout=1m --for=condition=ready pod -l kubearmor-app=kubearmor-controller -n kubearmor diff --git a/.github/workflows/ci-test-ubi-image.yml b/.github/workflows/ci-test-ubi-image.yml index b1f49f0673..0c9978963a 100644 --- a/.github/workflows/ci-test-ubi-image.yml +++ b/.github/workflows/ci-test-ubi-image.yml @@ -45,6 +45,14 @@ jobs: - uses: actions/setup-go@v5 with: go-version-file: 'KubeArmor/go.mod' + + - name: Check what paths were updated + uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + controller: + - 'pkg/KubeArmorController/**' - name: Install the latest LLVM toolchain run: ./.github/workflows/install-llvm.sh @@ -63,6 +71,10 @@ jobs: working-directory: pkg/KubeArmorOperator run: | make docker-build + + - name: Build KubeArmorController + if: steps.filter.outputs.controller == 'true' + run: make -C pkg/KubeArmorController/ docker-build TAG=latest - name: Run KubeArmor run: | @@ -70,10 +82,23 @@ jobs: docker save kubearmor/kubearmor-ubi:latest | sudo podman load docker save kubearmor/kubearmor-operator:latest | sudo podman load docker save kubearmor/kubearmor-snitch:latest | sudo podman load + + if [ ${{ steps.filter.outputs.controller }} == 'true' ]; then + docker save kubearmor/kubearmor-controller:latest | sudo podman load + fi + helm upgrade --install kubearmor-operator ./deployments/helm/KubeArmorOperator -n kubearmor --create-namespace --set kubearmorOperator.image.tag=latest kubectl get pods -A kubectl wait --for=condition=ready --timeout=5m -n kubearmor pod -l kubearmor-app=kubearmor-operator - kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-ubi-test.yaml + + if [[ ${{ steps.filter.outputs.controller }} == 'true' ]]; then + kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-ubi-test.yaml --dry-run=client -o json | \ + jq '.spec.kubearmorControllerImage.imagePullPolicy = "Never"' | \ + kubectl apply -f - + else + kubectl apply -f pkg/KubeArmorOperator/config/samples/kubearmor-ubi-test.yaml + fi + kubectl wait -n kubearmor --timeout=5m --for=jsonpath='{.status.phase}'=Running kubearmorconfigs/kubearmorconfig-test kubectl wait --timeout=7m --for=condition=ready pod -l kubearmor-app,kubearmor-app!=kubearmor-snitch,kubearmor-app!=kubearmor-controller -n kubearmor kubectl wait --timeout=1m --for=condition=ready pod -l kubearmor-app=kubearmor-controller -n kubearmor diff --git a/pkg/KubeArmorController/informer/nodewatcher.go b/pkg/KubeArmorController/informer/nodewatcher.go index bfca7eb034..f90af078dc 100644 --- a/pkg/KubeArmorController/informer/nodewatcher.go +++ b/pkg/KubeArmorController/informer/nodewatcher.go @@ -76,9 +76,6 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge } cluster.ClusterLock.Lock() defer cluster.ClusterLock.Unlock() - if _, ok := cluster.Nodes[node.Name]; !ok { - return - } if enforcer, ok := node.Labels["kubearmor.io/enforcer"]; ok { if _, ok := cluster.Nodes[node.Name]; ok { From 3943cbc00eebccb707948325370b0d356483cd73 Mon Sep 17 00:00:00 2001 From: Aryan-sharma11 Date: Tue, 30 Jul 2024 09:11:04 +0530 Subject: [PATCH 6/6] set annotateResources=true in MakeFile Signed-off-by: Aryan-sharma11 --- KubeArmor/Makefile | 2 +- pkg/KubeArmorController/informer/nodewatcher.go | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/KubeArmor/Makefile b/KubeArmor/Makefile index d550ec8ca1..8ed13c0456 100644 --- a/KubeArmor/Makefile +++ b/KubeArmor/Makefile @@ -51,7 +51,7 @@ run: build cd $(CURDIR); sudo rm -f /tmp/kubearmor.log cd $(CURDIR)/BPF; make clean cd $(CURDIR)/BPF; make - cd $(CURDIR); DEBUG=true sudo -E ./kubearmor -logPath=/tmp/kubearmor.log -enableKubeArmorPolicy -enableKubeArmorHostPolicy -hostVisibility=process,file,network,capabilities -defaultFilePosture block -defaultCapabilitiesPosture block -defaultNetworkPosture block -hostDefaultFilePosture block -hostDefaultCapabilitiesPosture block -hostDefaultNetworkPosture block + cd $(CURDIR); DEBUG=true sudo -E ./kubearmor -logPath=/tmp/kubearmor.log -enableKubeArmorPolicy -enableKubeArmorHostPolicy -hostVisibility=process,file,network,capabilities -defaultFilePosture block -defaultCapabilitiesPosture block -defaultNetworkPosture block -hostDefaultFilePosture block -hostDefaultCapabilitiesPosture block -hostDefaultNetworkPosture block -annotateResources=true .PHONY: run-container run-container: build diff --git a/pkg/KubeArmorController/informer/nodewatcher.go b/pkg/KubeArmorController/informer/nodewatcher.go index f90af078dc..8054f5ab23 100644 --- a/pkg/KubeArmorController/informer/nodewatcher.go +++ b/pkg/KubeArmorController/informer/nodewatcher.go @@ -147,5 +147,3 @@ func NodeWatcher(c *kubernetes.Clientset, cluster *types.Cluster, log logr.Logge inf.Run(wait.NeverStop) } - -// add fall back logix for pod annotataions