Skip to content

Commit

Permalink
Merge pull request #1831 from daemon1024/dogfood
Browse files Browse the repository at this point in the history
Enabling BPFLSM based KSP protection on Kubearmor itself
  • Loading branch information
Aryan-sharma11 authored Nov 5, 2024
2 parents 4731b3c + c85b541 commit 3574106
Show file tree
Hide file tree
Showing 17 changed files with 70 additions and 36 deletions.
8 changes: 5 additions & 3 deletions KubeArmor/BPF/enforcer.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,10 @@ int BPF_PROG(enforce_proc, struct linux_binprm *bprm, int ret) {
if (src_offset == NULL)
fromSourceCheck = false;

void *src_ptr = &src_buf->buf[*src_offset];
void *src_ptr;
if (src_buf->buf[*src_offset]) {
src_ptr = &src_buf->buf[*src_offset];
}
if (src_ptr == NULL)
fromSourceCheck = false;

Expand Down Expand Up @@ -152,10 +155,9 @@ int BPF_PROG(enforce_proc, struct linux_binprm *bprm, int ret) {
goto decision;
}


// match exec name
struct qstr d_name;
d_name = BPF_CORE_READ(f_path.dentry,d_name);
d_name = BPF_CORE_READ(f_path.dentry, d_name);
bpf_map_update_elem(&bufk, &two, z, BPF_ANY);
bpf_probe_read_str(pk->path, MAX_STRING_SIZE, d_name.name);

Expand Down
31 changes: 16 additions & 15 deletions KubeArmor/BPF/shared.h
Original file line number Diff line number Diff line change
Expand Up @@ -272,6 +272,9 @@ static inline void get_outer_key(struct outer_key *pokey,
struct task_struct *t) {
pokey->pid_ns = get_task_pid_ns_id(t);
pokey->mnt_ns = get_task_mnt_ns_id(t);
// TODO: Use cgroup ns as well for host process identification to support enforcement on deployments using hostpidns
// u32 cg_ns = BPF_CORE_READ(t, nsproxy, cgroup_ns, ns).inum;
// if (pokey->pid_ns == PROC_PID_INIT_INO && cg_ns == PROC_CGROUP_INIT_INO) {
if (pokey->pid_ns == PROC_PID_INIT_INO) {
pokey->pid_ns = 0;
pokey->mnt_ns = 0;
Expand All @@ -288,20 +291,13 @@ static __always_inline u32 init_context(event *event_data) {
event_data->host_ppid = get_task_ppid(task);
event_data->host_pid = bpf_get_current_pid_tgid() >> 32;

u32 pid = get_task_ns_tgid(task);
if (event_data->host_pid == pid) { // host
event_data->pid_id = 0;
event_data->mnt_id = 0;

event_data->ppid = get_task_ppid(task);
event_data->pid = bpf_get_current_pid_tgid() >> 32;
} else { // container
event_data->pid_id = get_task_pid_ns_id(task);
event_data->mnt_id = get_task_mnt_ns_id(task);
struct outer_key okey;
get_outer_key(&okey, task);
event_data->pid_id = okey.pid_ns;
event_data->mnt_id = okey.mnt_ns;

event_data->ppid = get_task_ns_ppid(task);
event_data->pid = pid;
}
event_data->ppid = get_task_ppid(task);
event_data->pid = get_task_ns_tgid(task);

event_data->uid = bpf_get_current_uid_gid();

Expand Down Expand Up @@ -487,10 +483,15 @@ static inline int match_and_enforce_path_hooks(struct path *f_path, u32 id,
if (src_offset == NULL)
fromSourceCheck = false;

void *ptr = &src_buf->buf[*src_offset];
void *src_ptr;
if (src_buf->buf[*src_offset]) {
src_ptr = &src_buf->buf[*src_offset];
}
if (src_ptr == NULL)
fromSourceCheck = false;

if (fromSourceCheck) {
bpf_probe_read_str(store->source, MAX_STRING_SIZE, ptr);
bpf_probe_read_str(store->source, MAX_STRING_SIZE, src_ptr);

val = bpf_map_lookup_elem(inner, store);

Expand Down
9 changes: 9 additions & 0 deletions KubeArmor/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ type KubearmorConfig struct {
MaxAlertPerSec int // Maximum alerts allowed per second
ThrottleSec int // Number of seconds for which subsequent alerts will be dropped
AnnotateResources bool // enable annotations by kubearmor if kubearmor-controller is not present

ProcFsMount string // path where procfs is hosted
}

// GlobalCfg Global configuration for Kubearmor
Expand Down Expand Up @@ -105,6 +107,7 @@ const (
ConfigMaxAlertPerSec string = "maxAlertPerSec"
ConfigThrottleSec string = "throttleSec"
ConfigAnnotateResources string = "annotateResources"
ConfigProcFsMount string = "procfsMount"
)

func readCmdLineParams() {
Expand Down Expand Up @@ -161,6 +164,8 @@ func readCmdLineParams() {

annotateResources := flag.Bool(ConfigAnnotateResources, false, "for kubearmor deployment without kubearmor-controller")

procFsMount := flag.String(ConfigProcFsMount, "/proc", "Path to the BPF filesystem to use for storing maps")

flags := []string{}
flag.VisitAll(func(f *flag.Flag) {
kv := fmt.Sprintf("%s:%v", f.Name, f.Value)
Expand Down Expand Up @@ -222,6 +227,8 @@ func readCmdLineParams() {
viper.SetDefault(ConfigThrottleSec, *throttleSec)

viper.SetDefault(ConfigAnnotateResources, *annotateResources)

viper.SetDefault(ConfigProcFsMount, *procFsMount)
}

// LoadConfig Load configuration
Expand Down Expand Up @@ -322,6 +329,8 @@ func LoadConfig() error {
GlobalCfg.ThrottleSec = viper.GetInt(ConfigThrottleSec)
GlobalCfg.AnnotateResources = viper.GetBool(ConfigAnnotateResources)

GlobalCfg.ProcFsMount = viper.GetString(ConfigProcFsMount)

kg.Printf("Final Configuration [%+v]", GlobalCfg)

return nil
Expand Down
5 changes: 3 additions & 2 deletions KubeArmor/core/containerdHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"time"
Expand Down Expand Up @@ -193,13 +194,13 @@ func (ch *ContainerdHandler) GetContainerInfo(ctx context.Context, containerID s

pid := strconv.Itoa(int(taskRes.Processes[0].Pid))

if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil {
if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil {
kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error())
}
}

if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil {
if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil {
kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error())
}
Expand Down
5 changes: 3 additions & 2 deletions KubeArmor/core/crioHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"time"

Expand Down Expand Up @@ -130,15 +131,15 @@ func (ch *CrioHandler) GetContainerInfo(ctx context.Context, containerID string,

pid := strconv.Itoa(containerInfo.Pid)

if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil {
if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil {
kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error())
}
} else {
return container, err
}

if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil {
if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil {
kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error())
}
Expand Down
5 changes: 3 additions & 2 deletions KubeArmor/core/dockerHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"os"
"path/filepath"
"slices"
"strconv"
"strings"
Expand Down Expand Up @@ -144,13 +145,13 @@ func (dh *DockerHandler) GetContainerInfo(containerID string, OwnerInfo map[stri

pid := strconv.Itoa(inspect.State.Pid)

if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/pid")); err == nil {
if _, err := fmt.Sscanf(data, "pid:[%d]\n", &container.PidNS); err != nil {
kg.Warnf("Unable to get PidNS (%s, %s, %s)", containerID, pid, err.Error())
}
}

if data, err := os.Readlink("/proc/" + pid + "/ns/mnt"); err == nil {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, pid, "/ns/mnt")); err == nil {
if _, err := fmt.Sscanf(data, "mnt:[%d]\n", &container.MntNS); err != nil {
kg.Warnf("Unable to get MntNS (%s, %s, %s)", containerID, pid, err.Error())
}
Expand Down
6 changes: 3 additions & 3 deletions KubeArmor/core/kubeUpdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -731,9 +731,9 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
}

// exception: kubearmor
if _, ok := pod.Labels["kubearmor-app"]; ok {
pod.Annotations["kubearmor-policy"] = "audited"
}
// if _, ok := pod.Labels["kubearmor-app"]; ok {
// pod.Annotations["kubearmor-policy"] = "audited"
// }

// == Visibility == //

Expand Down
4 changes: 2 additions & 2 deletions KubeArmor/enforcer/appArmorEnforcer.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,11 @@ profile apparmor-default flags=(attach_disconnected,mediate_deleted) {

existingProfiles := []string{}

if pids, err := os.ReadDir(filepath.Clean("/proc")); err == nil {
if pids, err := os.ReadDir(filepath.Clean(cfg.GlobalCfg.ProcFsMount)); err == nil {
for _, f := range pids {
if f.IsDir() {
if _, err := strconv.Atoi(f.Name()); err == nil {
if content, err := os.ReadFile(filepath.Clean("/proc/" + f.Name() + "/attr/current")); err == nil {
if content, err := os.ReadFile(filepath.Clean(cfg.GlobalCfg.ProcFsMount + "/" + f.Name() + "/attr/current")); err == nil {
line := strings.Split(string(content), "\n")[0]
words := strings.Split(line, " ")

Expand Down
4 changes: 2 additions & 2 deletions KubeArmor/enforcer/bpflsm/enforcer.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ import (
tp "github.com/kubearmor/KubeArmor/KubeArmor/types"
)

//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer ../../BPF/enforcer.bpf.c -- -I/usr/include/ -O2 -g
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer_path ../../BPF/enforcer_path.bpf.c -- -I/usr/include/ -O2 -g
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer ../../BPF/enforcer.bpf.c -- -I/usr/include/ -O2 -g -fno-stack-protector
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -cc clang enforcer_path ../../BPF/enforcer_path.bpf.c -- -I/usr/include/ -O2 -g -fno-stack-protector

// ===================== //
// == BPFLSM Enforcer == //
Expand Down
Binary file modified KubeArmor/enforcer/bpflsm/enforcer_bpfeb.o
Binary file not shown.
Binary file modified KubeArmor/enforcer/bpflsm/enforcer_bpfel.o
Binary file not shown.
Binary file modified KubeArmor/enforcer/bpflsm/enforcer_path_bpfeb.o
Binary file not shown.
Binary file modified KubeArmor/enforcer/bpflsm/enforcer_path_bpfel.o
Binary file not shown.
6 changes: 5 additions & 1 deletion KubeArmor/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (

var clusterPtr, gRPCPtr, logPathPtr *string
var enableKubeArmorPolicyPtr, enableKubeArmorHostPolicyPtr, enableKubeArmorVMPtr, coverageTestPtr, enableK8sEnv, tlsEnabled *bool
var defaultFilePosturePtr, defaultCapabilitiesPosturePtr, defaultNetworkPosturePtr, hostDefaultCapabilitiesPosturePtr, hostDefaultNetworkPosturePtr, hostDefaultFilePosturePtr *string
var defaultFilePosturePtr, defaultCapabilitiesPosturePtr, defaultNetworkPosturePtr, hostDefaultCapabilitiesPosturePtr, hostDefaultNetworkPosturePtr, hostDefaultFilePosturePtr, procFsMountPtr *string

func init() {
// options (string)
Expand All @@ -32,6 +32,8 @@ func init() {
hostDefaultNetworkPosturePtr = flag.String("hostDefaultNetworkPosture", "block", "configuring default enforcement action in global network context {allow|audit|block}")
hostDefaultCapabilitiesPosturePtr = flag.String("hostDefaultCapabilitiesPosture", "block", "configuring default enforcement action in global capability context {allow|audit|block}")

procFsMountPtr = flag.String("procfsMount", "/proc", "Path to the BPF filesystem to use for storing maps")

// options (boolean)
enableKubeArmorPolicyPtr = flag.Bool("enableKubeArmorPolicy", true, "enabling KubeArmorPolicy")
enableKubeArmorHostPolicyPtr = flag.Bool("enableKubeArmorHostPolicy", true, "enabling KubeArmorHostPolicy")
Expand All @@ -42,6 +44,7 @@ func init() {

// options (boolean)
coverageTestPtr = flag.Bool("coverageTest", false, "enabling CoverageTest")

}

// TestMain - test to drive external testing coverage
Expand All @@ -64,6 +67,7 @@ func TestMain(t *testing.T) {
fmt.Sprintf("-enableKubeArmorHostPolicy=%s", strconv.FormatBool(*enableKubeArmorHostPolicyPtr)),
fmt.Sprintf("-coverageTest=%s", strconv.FormatBool(*coverageTestPtr)),
fmt.Sprintf("-tlsEnabled=%s", strconv.FormatBool(*tlsEnabled)),
fmt.Sprintf("-procfsMount=%s", *procFsMountPtr),
}

t.Log("[INFO] Executed KubeArmor")
Expand Down
7 changes: 4 additions & 3 deletions KubeArmor/monitor/processTree.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package monitor

import (
"os"
"path/filepath"
"strconv"
"strings"
"sync"
Expand Down Expand Up @@ -231,7 +232,7 @@ func (mon *SystemMonitor) GetParentExecPath(containerID string, ctx SyscallConte

if readlink {
// just in case that it couldn't still get the full path
if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPPID), 10) + "/exe"); err == nil && data != "" && data != "/" {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPPID), 10), "/exe")); err == nil && data != "" && data != "/" {
// // Store it in the ActiveHostPidMap so we don't need to read procfs again
// // We don't call BuildPidNode Here cause that will put this into a cyclic function call loop
// if pidMap, ok := ActiveHostPidMap[containerID]; ok {
Expand Down Expand Up @@ -276,7 +277,7 @@ func (mon *SystemMonitor) GetExecPath(containerID string, ctx SyscallContext, re

if readlink {
// just in case that it couldn't still get the full path
if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPID), 10) + "/exe"); err == nil && data != "" && data != "/" {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPID), 10), "/exe")); err == nil && data != "" && data != "/" {
// // Store it in the ActiveHostPidMap so we don't need to read procfs again
// if pidMap, ok := ActiveHostPidMap[containerID]; ok {
// if node, ok := pidMap[ctx.HostPID]; ok {
Expand Down Expand Up @@ -318,7 +319,7 @@ func (mon *SystemMonitor) GetCommand(containerID string, ctx SyscallContext, rea

if readlink {
// just in case that it couldn't still get the full path
if data, err := os.Readlink("/proc/" + strconv.FormatUint(uint64(ctx.HostPID), 10) + "/exe"); err == nil && data != "" && data != "/" {
if data, err := os.Readlink(filepath.Join(cfg.GlobalCfg.ProcFsMount, strconv.FormatUint(uint64(ctx.HostPID), 10), "/exe")); err == nil && data != "" && data != "/" {
return data
} else if err != nil {
mon.Logger.Debugf("Could not read path from procfs due to %s", err.Error())
Expand Down
2 changes: 1 addition & 1 deletion deployments/get/objects.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,7 @@ func GenerateDaemonSet(env, namespace string) *appsv1.DaemonSet {
var terminationGracePeriodSeconds = int64(60)
var args = []string{
"-gRPC=" + strconv.Itoa(int(port)),
"-procfsMount=/host/procfs",
}

var containerVolumeMounts = []corev1.VolumeMount{
Expand Down Expand Up @@ -381,7 +382,6 @@ func GenerateDaemonSet(env, namespace string) *appsv1.DaemonSet {
Operator: "Exists",
},
},
HostPID: true,
HostNetwork: true,
RestartPolicy: "Always",
DNSPolicy: "ClusterFirstWithHostNet",
Expand Down
14 changes: 14 additions & 0 deletions pkg/KubeArmorOperator/common/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,13 +237,27 @@ var CommonVolumes = []corev1.Volume{
},
},
},
{
Name: "proc-fs-mount",
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: "/proc",
Type: &HostPathDirectory,
},
},
},
}

var CommonVolumesMount = []corev1.VolumeMount{
{
Name: "sys-kernel-debug-path",
MountPath: "/sys/kernel/debug",
},
{
Name: "proc-fs-mount",
MountPath: "/host/procfs",
ReadOnly: true,
},
}

var KubeArmorCaVolume = []corev1.Volume{
Expand Down

0 comments on commit 3574106

Please sign in to comment.