diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-03 16:54:54 -0800 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2025-12-03 16:54:54 -0800 |
| commit | 015e7b0b0e8e51f7321ec2aafc1d7fc0a8a5536f (patch) | |
| tree | 258f719e59946c733dd03198eba404e85f9d0945 /tools/testing/selftests/bpf/prog_tests/bpf_gotox.c | |
| parent | b6d993310a65b994f37e3347419d9ed398ee37a3 (diff) | |
| parent | ff34657aa72a4dab9c2fd38e1b31a506951f4b1c (diff) | |
Merge tag 'bpf-next-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov:
- Convert selftests/bpf/test_tc_edt and test_tc_tunnel from .sh to
test_progs runner (Alexis Lothoré)
- Convert selftests/bpf/test_xsk to test_progs runner (Bastien
Curutchet)
- Replace bpf memory allocator with kmalloc_nolock() in
bpf_local_storage (Amery Hung), and in bpf streams and range tree
(Puranjay Mohan)
- Introduce support for indirect jumps in BPF verifier and x86 JIT
(Anton Protopopov) and arm64 JIT (Puranjay Mohan)
- Remove runqslower bpf tool (Hoyeon Lee)
- Fix corner cases in the verifier to close several syzbot reports
(Eduard Zingerman, KaFai Wan)
- Several improvements in deadlock detection in rqspinlock (Kumar
Kartikeya Dwivedi)
- Implement "jmp" mode for BPF trampoline and corresponding
DYNAMIC_FTRACE_WITH_JMP. It improves "fexit" program type performance
from 80 M/s to 136 M/s. With Steven's Ack. (Menglong Dong)
- Add ability to test non-linear skbs in BPF_PROG_TEST_RUN (Paul
Chaignon)
- Do not let BPF_PROG_TEST_RUN emit invalid GSO types to stack (Daniel
Borkmann)
- Generalize buildid reader into bpf_dynptr (Mykyta Yatsenko)
- Optimize bpf_map_update_elem() for map-in-map types (Ritesh
Oedayrajsingh Varma)
- Introduce overwrite mode for BPF ring buffer (Xu Kuohai)
* tag 'bpf-next-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (169 commits)
bpf: optimize bpf_map_update_elem() for map-in-map types
bpf: make kprobe_multi_link_prog_run always_inline
selftests/bpf: do not hardcode target rate in test_tc_edt BPF program
selftests/bpf: remove test_tc_edt.sh
selftests/bpf: integrate test_tc_edt into test_progs
selftests/bpf: rename test_tc_edt.bpf.c section to expose program type
selftests/bpf: Add success stats to rqspinlock stress test
rqspinlock: Precede non-head waiter queueing with AA check
rqspinlock: Disable spinning for trylock fallback
rqspinlock: Use trylock fallback when per-CPU rqnode is busy
rqspinlock: Perform AA checks immediately
rqspinlock: Enclose lock/unlock within lock entry acquisitions
bpf: Remove runqslower tool
selftests/bpf: Remove usage of lsm/file_alloc_security in selftest
bpf: Disable file_alloc_security hook
bpf: check for insn arrays in check_ptr_alignment
bpf: force BPF_F_RDONLY_PROG on insn array creation
bpf: Fix exclusive map memory leak
selftests/bpf: Make CS length configurable for rqspinlock stress test
selftests/bpf: Add lock wait time stats to rqspinlock stress test
...
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests/bpf_gotox.c')
| -rw-r--r-- | tools/testing/selftests/bpf/prog_tests/bpf_gotox.c | 292 |
1 files changed, 292 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c new file mode 100644 index 000000000000..d138cc7b1bda --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_gotox.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> + +#include <linux/if_ether.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/in6.h> +#include <linux/udp.h> +#include <linux/tcp.h> + +#include <sys/syscall.h> +#include <bpf/bpf.h> + +#include "bpf_gotox.skel.h" + +static void __test_run(struct bpf_program *prog, void *ctx_in, size_t ctx_size_in) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts, + .ctx_in = ctx_in, + .ctx_size_in = ctx_size_in, + ); + int err, prog_fd; + + prog_fd = bpf_program__fd(prog); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run_opts err"); +} + +static void __subtest(struct bpf_gotox *skel, void (*check)(struct bpf_gotox *)) +{ + if (skel->data->skip) + test__skip(); + else + check(skel); +} + +static void check_simple(struct bpf_gotox *skel, + struct bpf_program *prog, + __u64 ctx_in, + __u64 expected) +{ + skel->bss->ret_user = 0; + + __test_run(prog, &ctx_in, sizeof(ctx_in)); + + if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user")) + return; +} + +static void check_simple_fentry(struct bpf_gotox *skel, + struct bpf_program *prog, + __u64 ctx_in, + __u64 expected) +{ + skel->bss->in_user = ctx_in; + skel->bss->ret_user = 0; + + /* trigger */ + usleep(1); + + if (!ASSERT_EQ(skel->bss->ret_user, expected, "skel->bss->ret_user")) + return; +} + +/* validate that for two loads of the same jump table libbpf generates only one map */ +static void check_one_map_two_jumps(struct bpf_gotox *skel) +{ + struct bpf_prog_info prog_info; + struct bpf_map_info map_info; + __u32 len; + __u32 map_ids[16]; + int prog_fd, map_fd; + int ret; + int i; + bool seen = false; + + memset(&prog_info, 0, sizeof(prog_info)); + prog_info.map_ids = (long)map_ids; + prog_info.nr_map_ids = ARRAY_SIZE(map_ids); + prog_fd = bpf_program__fd(skel->progs.one_map_two_jumps); + if (!ASSERT_GE(prog_fd, 0, "bpf_program__fd(one_map_two_jumps)")) + return; + + len = sizeof(prog_info); + ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &len); + if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(prog_fd)")) + return; + + for (i = 0; i < prog_info.nr_map_ids; i++) { + map_fd = bpf_map_get_fd_by_id(map_ids[i]); + if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id")) + return; + + len = sizeof(map_info); + memset(&map_info, 0, len); + ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &len); + if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd(map_fd)")) { + close(map_fd); + return; + } + + if (map_info.type == BPF_MAP_TYPE_INSN_ARRAY) { + if (!ASSERT_EQ(seen, false, "more than one INSN_ARRAY map")) { + close(map_fd); + return; + } + seen = true; + } + close(map_fd); + } + + ASSERT_EQ(seen, true, "no INSN_ARRAY map"); +} + +static void check_one_switch(struct bpf_gotox *skel) +{ + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; + int i; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.one_switch, in[i], out[i]); +} + +static void check_one_switch_non_zero_sec_off(struct bpf_gotox *skel) +{ + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; + int i; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.one_switch_non_zero_sec_off, in[i], out[i]); +} + +static void check_two_switches(struct bpf_gotox *skel) +{ + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {103, 104, 107, 205, 115, 1019, 1019}; + int i; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.two_switches, in[i], out[i]); +} + +static void check_big_jump_table(struct bpf_gotox *skel) +{ + __u64 in[] = {0, 11, 27, 31, 22, 45, 99}; + __u64 out[] = {2, 3, 4, 5, 19, 19, 19}; + int i; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.big_jump_table, in[i], out[i]); +} + +static void check_one_jump_two_maps(struct bpf_gotox *skel) +{ + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {12, 15, 7 , 15, 12, 15, 15}; + int i; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.one_jump_two_maps, in[i], out[i]); +} + +static void check_static_global(struct bpf_gotox *skel) +{ + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; + int i; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.use_static_global1, in[i], out[i]); + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.use_static_global2, in[i], out[i]); +} + +static void check_nonstatic_global(struct bpf_gotox *skel) +{ + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; + int i; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.use_nonstatic_global1, in[i], out[i]); + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple(skel, skel->progs.use_nonstatic_global2, in[i], out[i]); +} + +static void check_other_sec(struct bpf_gotox *skel) +{ + struct bpf_link *link; + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; + int i; + + link = bpf_program__attach(skel->progs.simple_test_other_sec); + if (!ASSERT_OK_PTR(link, "link")) + return; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple_fentry(skel, skel->progs.simple_test_other_sec, in[i], out[i]); + + bpf_link__destroy(link); +} + +static void check_static_global_other_sec(struct bpf_gotox *skel) +{ + struct bpf_link *link; + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; + int i; + + link = bpf_program__attach(skel->progs.use_static_global_other_sec); + if (!ASSERT_OK_PTR(link, "link")) + return; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple_fentry(skel, skel->progs.use_static_global_other_sec, in[i], out[i]); + + bpf_link__destroy(link); +} + +static void check_nonstatic_global_other_sec(struct bpf_gotox *skel) +{ + struct bpf_link *link; + __u64 in[] = {0, 1, 2, 3, 4, 5, 77}; + __u64 out[] = {2, 3, 4, 5, 7, 19, 19}; + int i; + + link = bpf_program__attach(skel->progs.use_nonstatic_global_other_sec); + if (!ASSERT_OK_PTR(link, "link")) + return; + + for (i = 0; i < ARRAY_SIZE(in); i++) + check_simple_fentry(skel, skel->progs.use_nonstatic_global_other_sec, in[i], out[i]); + + bpf_link__destroy(link); +} + +void test_bpf_gotox(void) +{ + struct bpf_gotox *skel; + int ret; + + skel = bpf_gotox__open(); + if (!ASSERT_NEQ(skel, NULL, "bpf_gotox__open")) + return; + + ret = bpf_gotox__load(skel); + if (!ASSERT_OK(ret, "bpf_gotox__load")) + return; + + skel->bss->pid = getpid(); + + if (test__start_subtest("one-switch")) + __subtest(skel, check_one_switch); + + if (test__start_subtest("one-switch-non-zero-sec-offset")) + __subtest(skel, check_one_switch_non_zero_sec_off); + + if (test__start_subtest("two-switches")) + __subtest(skel, check_two_switches); + + if (test__start_subtest("big-jump-table")) + __subtest(skel, check_big_jump_table); + + if (test__start_subtest("static-global")) + __subtest(skel, check_static_global); + + if (test__start_subtest("nonstatic-global")) + __subtest(skel, check_nonstatic_global); + + if (test__start_subtest("other-sec")) + __subtest(skel, check_other_sec); + + if (test__start_subtest("static-global-other-sec")) + __subtest(skel, check_static_global_other_sec); + + if (test__start_subtest("nonstatic-global-other-sec")) + __subtest(skel, check_nonstatic_global_other_sec); + + if (test__start_subtest("one-jump-two-maps")) + __subtest(skel, check_one_jump_two_maps); + + if (test__start_subtest("one-map-two-jumps")) + __subtest(skel, check_one_map_two_jumps); + + bpf_gotox__destroy(skel); +} |