From 39a10853b75bda62387840c43b0b4964c2559f8e Mon Sep 17 00:00:00 2001 From: Amery Hung Date: Thu, 29 May 2025 12:02:19 -0700 Subject: [PATCH 1/4] bpf: Allow verifier to fixup kernel module kfuncs Allow verifier to fixup kfuncs in kernel module to support kfuncs with __prog arguments. Currently, special kfuncs and kfuncs with __prog arguments are kernel kfuncs. As there is no safety reason that prevents a kernel module kfunc from accessing prog->aux, allow it by removing the kernel BTF check. Signed-off-by: Amery Hung --- kernel/bpf/verifier.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e892df386eed..d5f1046d08b7 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -21889,8 +21889,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (!bpf_jit_supports_far_kfunc_call()) insn->imm = BPF_CALL_IMM(desc->addr); - if (insn->off) - return 0; + if (desc->func_id == special_kfunc_list[KF_bpf_obj_new_impl] || desc->func_id == special_kfunc_list[KF_bpf_percpu_obj_new_impl]) { struct btf_struct_meta *kptr_struct_meta = env->insn_aux_data[insn_idx].kptr_struct_meta; From e2d2b87d5d2fa3b77e6c19ca0b9c05a7a20296db Mon Sep 17 00:00:00 2001 From: Amery Hung Date: Tue, 30 Sep 2025 13:32:45 -0700 Subject: [PATCH 2/4] bpf: Support associating BPF program with struct_ops Add a new BPF command BPF_STRUCT_OPS_ASSOCIATE_PROG to allow associating a BPF program with a struct_ops. This command takes a file descriptor of a struct_ops map and a BPF program and set prog->aux->st_ops_assoc to the kdata of the struct_ops map. The command does not accept a struct_ops program or a non-struct_ops map. Programs of a struct_ops map is automatically associated with the map during map update. If a program is shared between two struct_ops maps, the first one will be the map associated with the program. The associated struct_ops map, once set cannot be changed later. This restriction may be lifted in the future if there is a use case. Each associated programs except struct_ops programs of the map will take a refcount on the map to pin it so that prog->aux->st_ops_assoc, if set, is always valid. However, it is not guaranteed whether the map members are fully updated nor is it attached or not. For example, a BPF program can be associated with a struct_ops map before map_update. The struct_ops implementer will be responsible for maintaining and checking the state of the associated struct_ops map before accessing it. Signed-off-by: Amery Hung --- include/linux/bpf.h | 11 ++++++++++ include/uapi/linux/bpf.h | 16 ++++++++++++++ kernel/bpf/bpf_struct_ops.c | 32 ++++++++++++++++++++++++++++ kernel/bpf/core.c | 6 ++++++ kernel/bpf/syscall.c | 38 ++++++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 16 ++++++++++++++ 6 files changed, 119 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a98c83346134..d5052745ffc6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1710,6 +1710,8 @@ struct bpf_prog_aux { struct rcu_head rcu; }; struct bpf_stream stream[2]; + struct mutex st_ops_assoc_mutex; + void *st_ops_assoc; }; struct bpf_prog { @@ -2010,6 +2012,8 @@ static inline void bpf_module_put(const void *data, struct module *owner) module_put(owner); } int bpf_struct_ops_link_create(union bpf_attr *attr); +int bpf_struct_ops_assoc_prog(struct bpf_map *map, struct bpf_prog *prog); +void bpf_struct_ops_disassoc_prog(struct bpf_prog *prog); u32 bpf_struct_ops_id(const void *kdata); #ifdef CONFIG_NET @@ -2057,6 +2061,13 @@ static inline int bpf_struct_ops_link_create(union bpf_attr *attr) { return -EOPNOTSUPP; } +static inline void bpf_struct_ops_assoc_prog(struct bpf_map *map, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +static inline void bpf_struct_ops_disassoc_prog(struct bpf_prog *prog) +{ +} static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) { } diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ae83d8649ef1..1e76fa22dd61 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -918,6 +918,16 @@ union bpf_iter_link_info { * Number of bytes read from the stream on success, or -1 if an * error occurred (in which case, *errno* is set appropriately). * + * BPF_STRUCT_OPS_ASSOCIATE_PROG + * Description + * Associate a BPF program with a struct_ops map. The struct_ops + * map is identified by *map_fd* and the BPF program is + * identified by *prog_fd*. + * + * Return + * 0 on success or -1 if an error occurred (in which case, + * *errno* is set appropriately). + * * NOTES * eBPF objects (maps and programs) can be shared between processes. * @@ -974,6 +984,7 @@ enum bpf_cmd { BPF_PROG_BIND_MAP, BPF_TOKEN_CREATE, BPF_PROG_STREAM_READ_BY_FD, + BPF_STRUCT_OPS_ASSOCIATE_PROG, __MAX_BPF_CMD, }; @@ -1890,6 +1901,11 @@ union bpf_attr { __u32 prog_fd; } prog_stream_read; + struct { + __u32 map_fd; + __u32 prog_fd; + } struct_ops_assoc_prog; + } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index a41e6730edcf..e57428e1653b 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -528,6 +528,7 @@ static void bpf_struct_ops_map_put_progs(struct bpf_struct_ops_map *st_map) for (i = 0; i < st_map->funcs_cnt; i++) { if (!st_map->links[i]) break; + bpf_struct_ops_disassoc_prog(st_map->links[i]->prog); bpf_link_put(st_map->links[i]); st_map->links[i] = NULL; } @@ -801,6 +802,11 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, goto reset_unlock; } + /* Don't stop a program from being reused. prog->aux->st_ops_assoc + * will point to the first struct_ops kdata. + */ + bpf_struct_ops_assoc_prog(&st_map->map, prog); + link = kzalloc(sizeof(*link), GFP_USER); if (!link) { bpf_prog_put(prog); @@ -1394,6 +1400,32 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) return err; } +int bpf_struct_ops_assoc_prog(struct bpf_map *map, struct bpf_prog *prog) +{ + struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; + void *kdata = &st_map->kvalue.data; + int ret = 0; + + mutex_lock(&prog->aux->st_ops_assoc_mutex); + + if (prog->aux->st_ops_assoc && prog->aux->st_ops_assoc != kdata) { + ret = -EBUSY; + goto out; + } + + prog->aux->st_ops_assoc = kdata; +out: + mutex_unlock(&prog->aux->st_ops_assoc_mutex); + return ret; +} + +void bpf_struct_ops_disassoc_prog(struct bpf_prog *prog) +{ + mutex_lock(&prog->aux->st_ops_assoc_mutex); + prog->aux->st_ops_assoc = NULL; + mutex_unlock(&prog->aux->st_ops_assoc_mutex); +} + void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) { struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d595fe512498..bf9110a82962 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -136,6 +136,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag mutex_init(&fp->aux->used_maps_mutex); mutex_init(&fp->aux->ext_mutex); mutex_init(&fp->aux->dst_mutex); + mutex_init(&fp->aux->st_ops_assoc_mutex); #ifdef CONFIG_BPF_SYSCALL bpf_prog_stream_init(fp); @@ -286,6 +287,7 @@ void __bpf_prog_free(struct bpf_prog *fp) if (fp->aux) { mutex_destroy(&fp->aux->used_maps_mutex); mutex_destroy(&fp->aux->dst_mutex); + mutex_destroy(&fp->aux->st_ops_assoc_mutex); kfree(fp->aux->poke_tab); kfree(fp->aux); } @@ -2875,6 +2877,10 @@ static void bpf_prog_free_deferred(struct work_struct *work) #endif bpf_free_used_maps(aux); bpf_free_used_btfs(aux); + if (aux->st_ops_assoc) { + bpf_struct_ops_put(aux->st_ops_assoc); + bpf_struct_ops_disassoc_prog(aux->prog); + } if (bpf_prog_is_dev_bound(aux)) bpf_prog_dev_bound_destroy(aux->prog); #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index a48fa86f82a7..1d7946a8208c 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -6092,6 +6092,41 @@ static int prog_stream_read(union bpf_attr *attr) return ret; } +#define BPF_STRUCT_OPS_ASSOCIATE_PROG_LAST_FIELD struct_ops_assoc_prog.prog_fd + +static int struct_ops_assoc_prog(union bpf_attr *attr) +{ + struct bpf_prog *prog; + struct bpf_map *map; + int ret; + + if (CHECK_ATTR(BPF_STRUCT_OPS_ASSOCIATE_PROG)) + return -EINVAL; + + prog = bpf_prog_get(attr->struct_ops_assoc_prog.prog_fd); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + map = bpf_map_get(attr->struct_ops_assoc_prog.map_fd); + if (IS_ERR(map)) { + ret = PTR_ERR(map); + goto out; + } + + if (map->map_type != BPF_MAP_TYPE_STRUCT_OPS || + prog->type == BPF_PROG_TYPE_STRUCT_OPS) { + ret = -EINVAL; + goto out; + } + + ret = bpf_struct_ops_assoc_prog(map, prog); +out: + if (ret && !IS_ERR(map)) + bpf_map_put(map); + bpf_prog_put(prog); + return ret; +} + static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size) { union bpf_attr attr; @@ -6231,6 +6266,9 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size) case BPF_PROG_STREAM_READ_BY_FD: err = prog_stream_read(&attr); break; + case BPF_STRUCT_OPS_ASSOCIATE_PROG: + err = struct_ops_assoc_prog(&attr); + break; default: err = -EINVAL; break; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index ae83d8649ef1..1e76fa22dd61 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -918,6 +918,16 @@ union bpf_iter_link_info { * Number of bytes read from the stream on success, or -1 if an * error occurred (in which case, *errno* is set appropriately). * + * BPF_STRUCT_OPS_ASSOCIATE_PROG + * Description + * Associate a BPF program with a struct_ops map. The struct_ops + * map is identified by *map_fd* and the BPF program is + * identified by *prog_fd*. + * + * Return + * 0 on success or -1 if an error occurred (in which case, + * *errno* is set appropriately). + * * NOTES * eBPF objects (maps and programs) can be shared between processes. * @@ -974,6 +984,7 @@ enum bpf_cmd { BPF_PROG_BIND_MAP, BPF_TOKEN_CREATE, BPF_PROG_STREAM_READ_BY_FD, + BPF_STRUCT_OPS_ASSOCIATE_PROG, __MAX_BPF_CMD, }; @@ -1890,6 +1901,11 @@ union bpf_attr { __u32 prog_fd; } prog_stream_read; + struct { + __u32 map_fd; + __u32 prog_fd; + } struct_ops_assoc_prog; + } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF From 259775a38cecee5f5ce81e6460f2206f6ea7441e Mon Sep 17 00:00:00 2001 From: Amery Hung Date: Tue, 30 Sep 2025 16:41:52 -0700 Subject: [PATCH 3/4] libbpf: Add bpf_struct_ops_associate_prog() API Add low-level wrapper API for BPF_STRUCT_OPS_ASSOCIATE_PROG command in bpf() syscall. Signed-off-by: Amery Hung --- tools/lib/bpf/bpf.c | 18 ++++++++++++++++++ tools/lib/bpf/bpf.h | 19 +++++++++++++++++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 38 insertions(+) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 339b19797237..230fc2fa98f9 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -1397,3 +1397,21 @@ int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len, err = sys_bpf(BPF_PROG_STREAM_READ_BY_FD, &attr, attr_sz); return libbpf_err_errno(err); } + +int bpf_struct_ops_associate_prog(int map_fd, int prog_fd, + struct bpf_struct_ops_associate_prog_opts *opts) +{ + const size_t attr_sz = offsetofend(union bpf_attr, struct_ops_assoc_prog); + union bpf_attr attr; + int err; + + if (!OPTS_VALID(opts, bpf_struct_ops_associate_prog_opts)) + return libbpf_err(-EINVAL); + + memset(&attr, 0, attr_sz); + attr.struct_ops_assoc_prog.map_fd = map_fd; + attr.struct_ops_assoc_prog.prog_fd = prog_fd; + + err = sys_bpf(BPF_STRUCT_OPS_ASSOCIATE_PROG, &attr, attr_sz); + return libbpf_err_errno(err); +} diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index e983a3e40d61..99fe189ca7c6 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -733,6 +733,25 @@ struct bpf_prog_stream_read_opts { LIBBPF_API int bpf_prog_stream_read(int prog_fd, __u32 stream_id, void *buf, __u32 buf_len, struct bpf_prog_stream_read_opts *opts); +struct bpf_struct_ops_associate_prog_opts { + size_t sz; + size_t :0; +}; +#define bpf_struct_ops_associate_prog_opts__last_field sz +/** + * @brief **bpf_struct_ops_associate_prog** associate a BPF program with a + * struct_ops map. + * + * @param map_fd FD for the struct_ops map to be associated with a BPF progam + * @param prog_fd FD for the BPF program + * @param opts optional options, can be NULL + * + * @return 0 on success; negative error code, otherwise (errno is also set to + * the error code) + */ +LIBBPF_API int bpf_struct_ops_associate_prog(int map_fd, int prog_fd, + struct bpf_struct_ops_associate_prog_opts *opts); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 8ed8749907d4..3a156a663210 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -451,4 +451,5 @@ LIBBPF_1.7.0 { global: bpf_map__set_exclusive_program; bpf_map__exclusive_program; + bpf_struct_ops_associate_prog; } LIBBPF_1.6.0; From e8e664ccc3ae69e138fee5d530db236481dd8934 Mon Sep 17 00:00:00 2001 From: Amery Hung Date: Tue, 30 Sep 2025 16:42:43 -0700 Subject: [PATCH 4/4] selftests/bpf: Test BPF_STRUCT_OPS_ASSOCIATE_PROG command Test BPF_STRUCT_OPS_ASSOCIATE_PROG command that associates a BPF program with a struct_ops. The test follows the same logic in commit ba7000f1c360 ("selftests/bpf: Test multi_st_ops and calling kfuncs from different programs"), but instead of using map id to identify a specific struct_ops this test uses the new BPF command to associate a struct_ops with a program. The test consists of two set of almost identical struct_ops maps and BPF programs associated with the map. Their only difference is a unique value returned by bpf_testmod_multi_st_ops::test_1(). The test first loads the programs and associates them with struct_ops maps. Then, the test exercises the BPF programs. They will in turn call kfunc bpf_kfunc_multi_st_ops_test_1_prog_arg() to trigger test_1() of the associated struct_ops map, and then check if the right unique value is returned. Signed-off-by: Amery Hung --- .../bpf/prog_tests/test_struct_ops_assoc.c | 76 +++++++++++++ .../selftests/bpf/progs/struct_ops_assoc.c | 105 ++++++++++++++++++ .../selftests/bpf/test_kmods/bpf_testmod.c | 17 +++ .../bpf/test_kmods/bpf_testmod_kfunc.h | 1 + 4 files changed, 199 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/test_struct_ops_assoc.c create mode 100644 tools/testing/selftests/bpf/progs/struct_ops_assoc.c diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_assoc.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_assoc.c new file mode 100644 index 000000000000..da8fab0fe5cf --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_assoc.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "struct_ops_assoc.skel.h" + +static void test_st_ops_assoc(void) +{ + int sys_enter_prog_a_fd, sys_enter_prog_b_fd; + int syscall_prog_a_fd, syscall_prog_b_fd; + struct struct_ops_assoc *skel = NULL; + int err, pid, map_a_fd, map_b_fd; + + skel = struct_ops_assoc__open_and_load(); + if (!ASSERT_OK_PTR(skel, "struct_ops_assoc__open")) + goto out; + + sys_enter_prog_a_fd = bpf_program__fd(skel->progs.sys_enter_prog_a); + sys_enter_prog_b_fd = bpf_program__fd(skel->progs.sys_enter_prog_b); + syscall_prog_a_fd = bpf_program__fd(skel->progs.syscall_prog_a); + syscall_prog_b_fd = bpf_program__fd(skel->progs.syscall_prog_b); + map_a_fd = bpf_map__fd(skel->maps.st_ops_map_a); + map_b_fd = bpf_map__fd(skel->maps.st_ops_map_b); + + err = bpf_struct_ops_associate_prog(map_a_fd, syscall_prog_a_fd, NULL); + if (!ASSERT_OK(err, "bpf_struct_ops_associate_prog")) + goto out; + + err = bpf_struct_ops_associate_prog(map_a_fd, sys_enter_prog_a_fd, NULL); + if (!ASSERT_OK(err, "bpf_struct_ops_associate_prog")) + goto out; + + err = bpf_struct_ops_associate_prog(map_b_fd, syscall_prog_b_fd, NULL); + if (!ASSERT_OK(err, "bpf_struct_ops_associate_prog")) + goto out; + + err = bpf_struct_ops_associate_prog(map_b_fd, sys_enter_prog_b_fd, NULL); + if (!ASSERT_OK(err, "bpf_struct_ops_associate_prog")) + goto out; + + /* sys_enter_prog_a already associated with map_a */ + err = bpf_struct_ops_associate_prog(map_b_fd, sys_enter_prog_a_fd, NULL); + if (!ASSERT_ERR(err, "bpf_struct_ops_associate_prog")) + goto out; + + err = struct_ops_assoc__attach(skel); + if (!ASSERT_OK(err, "struct_ops_assoc__attach")) + goto out; + + /* run tracing prog that calls .test_1 and checks return */ + pid = getpid(); + skel->bss->test_pid = pid; + sys_gettid(); + skel->bss->test_pid = 0; + + ASSERT_EQ(skel->bss->test_err_a, 0, "skel->bss->test_err_a"); + ASSERT_EQ(skel->bss->test_err_b, 0, "skel->bss->test_err_b"); + + /* run syscall_prog that calls .test_1 and checks return */ + err = bpf_prog_test_run_opts(syscall_prog_a_fd, NULL); + ASSERT_OK(err, "bpf_prog_test_run_opts"); + + err = bpf_prog_test_run_opts(syscall_prog_b_fd, NULL); + ASSERT_OK(err, "bpf_prog_test_run_opts"); + + ASSERT_EQ(skel->bss->test_err_a, 0, "skel->bss->test_err"); + ASSERT_EQ(skel->bss->test_err_b, 0, "skel->bss->test_err"); + +out: + struct_ops_assoc__destroy(skel); +} + +void test_struct_ops_assoc(void) +{ + if (test__start_subtest("st_ops_assoc")) + test_st_ops_assoc(); +} diff --git a/tools/testing/selftests/bpf/progs/struct_ops_assoc.c b/tools/testing/selftests/bpf/progs/struct_ops_assoc.c new file mode 100644 index 000000000000..fe47287a49f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_assoc.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include "bpf_misc.h" +#include "../test_kmods/bpf_testmod.h" +#include "../test_kmods/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +int test_pid; + +/* Programs associated with st_ops_map_a */ + +#define MAP_A_MAGIC 1234 +int test_err_a; + +SEC("struct_ops") +int BPF_PROG(test_1_a, struct st_ops_args *args) +{ + return MAP_A_MAGIC; +} + +SEC("tp_btf/sys_enter") +int BPF_PROG(sys_enter_prog_a, struct pt_regs *regs, long id) +{ + struct st_ops_args args = {}; + struct task_struct *task; + int ret; + + task = bpf_get_current_task_btf(); + if (!test_pid || task->pid != test_pid) + return 0; + + ret = bpf_kfunc_multi_st_ops_test_1_prog_arg(&args, NULL); + if (ret != MAP_A_MAGIC) + test_err_a++; + + return 0; +} + +SEC("syscall") +int syscall_prog_a(void *ctx) +{ + struct st_ops_args args = {}; + int ret; + + ret = bpf_kfunc_multi_st_ops_test_1_prog_arg(&args, NULL); + if (ret != MAP_A_MAGIC) + test_err_a++; + + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_multi_st_ops st_ops_map_a = { + .test_1 = (void *)test_1_a, +}; + +/* Programs associated with st_ops_map_b */ + +#define MAP_B_MAGIC 5678 +int test_err_b; + +SEC("struct_ops") +int BPF_PROG(test_1_b, struct st_ops_args *args) +{ + return MAP_B_MAGIC; +} + +SEC("tp_btf/sys_enter") +int BPF_PROG(sys_enter_prog_b, struct pt_regs *regs, long id) +{ + struct st_ops_args args = {}; + struct task_struct *task; + int ret; + + task = bpf_get_current_task_btf(); + if (!test_pid || task->pid != test_pid) + return 0; + + ret = bpf_kfunc_multi_st_ops_test_1_prog_arg(&args, NULL); + if (ret != MAP_B_MAGIC) + test_err_b++; + + return 0; +} + +SEC("syscall") +int syscall_prog_b(void *ctx) +{ + struct st_ops_args args = {}; + int ret; + + ret = bpf_kfunc_multi_st_ops_test_1_prog_arg(&args, NULL); + if (ret != MAP_B_MAGIC) + test_err_b++; + + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_multi_st_ops st_ops_map_b = { + .test_1 = (void *)test_1_b, +}; diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c index 6df6475f5dbc..2e83a041cbe0 100644 --- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod.c @@ -1101,6 +1101,7 @@ __bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) } __bpf_kfunc int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id); +__bpf_kfunc int bpf_kfunc_multi_st_ops_test_1_prog_arg(struct st_ops_args *args, void *aux_prog); BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) @@ -1143,6 +1144,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABL BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE) BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1, KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1_prog_arg, KF_TRUSTED_ARGS) BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) static int bpf_testmod_ops_init(struct btf *btf) @@ -1604,6 +1606,7 @@ static struct bpf_testmod_multi_st_ops *multi_st_ops_find_nolock(u32 id) return NULL; } +/* Call test_1() of the struct_ops map identified by the id */ int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id) { struct bpf_testmod_multi_st_ops *st_ops; @@ -1619,6 +1622,20 @@ int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id) return ret; } +/* Call test_1() of the associated struct_ops map */ +int bpf_kfunc_multi_st_ops_test_1_prog_arg(struct st_ops_args *args, void *aux__prog) +{ + struct bpf_prog_aux *prog_aux = (struct bpf_prog_aux *)aux__prog; + struct bpf_testmod_multi_st_ops *st_ops; + int ret = -1; + + st_ops = (struct bpf_testmod_multi_st_ops *)prog_aux->st_ops_assoc; + if (st_ops) + ret = st_ops->test_1(args); + + return ret; +} + static int multi_st_ops_reg(void *kdata, struct bpf_link *link) { struct bpf_testmod_multi_st_ops *st_ops = diff --git a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h index 4df6fa6a92cb..d40f4cddbd1e 100644 --- a/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h @@ -162,5 +162,6 @@ struct task_struct *bpf_kfunc_ret_rcu_test(void) __ksym; int *bpf_kfunc_ret_rcu_test_nostruct(int rdonly_buf_size) __ksym; int bpf_kfunc_multi_st_ops_test_1(struct st_ops_args *args, u32 id) __ksym; +int bpf_kfunc_multi_st_ops_test_1_prog_arg(struct st_ops_args *args, void *aux__prog) __ksym; #endif /* _BPF_TESTMOD_KFUNC_H */