summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
blob: 6fca459ba550cb6ef0ab94b51249e1f80b572d7f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "stacktrace_ips.skel.h"

#ifdef __x86_64__
static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
{
	__u64 ips[PERF_MAX_STACK_DEPTH];
	struct ksyms *ksyms = NULL;
	int i, err = 0;
	va_list args;

	/* sorted by addr */
	ksyms = load_kallsyms_local();
	if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
		return -1;

	/* unlikely, but... */
	if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
		return -1;

	err = bpf_map_lookup_elem(fd, &key, ips);
	if (err)
		goto out;

	/*
	 * Compare all symbols provided via arguments with stacktrace ips,
	 * and their related symbol addresses.t
	 */
	va_start(args, cnt);

	for (i = 0; i < cnt; i++) {
		unsigned long val;
		struct ksym *ksym;

		val = va_arg(args, unsigned long);
		ksym = ksym_search_local(ksyms, ips[i]);
		if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
			break;
		ASSERT_EQ(ksym->addr, val, "stack_cmp");
	}

	va_end(args);

out:
	free_kallsyms_local(ksyms);
	return err;
}

static void test_stacktrace_ips_kprobe_multi(bool retprobe)
{
	LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
		.retprobe = retprobe
	);
	LIBBPF_OPTS(bpf_test_run_opts, topts);
	struct stacktrace_ips *skel;

	skel = stacktrace_ips__open_and_load();
	if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
		return;

	if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
		test__skip();
		goto cleanup;
	}

	skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
							skel->progs.kprobe_multi_test,
							"bpf_testmod_stacktrace_test", &opts);
	if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
		goto cleanup;

	trigger_module_test_read(1);

	load_kallsyms();

	check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
			     ksym_get_addr("bpf_testmod_stacktrace_test_3"),
			     ksym_get_addr("bpf_testmod_stacktrace_test_2"),
			     ksym_get_addr("bpf_testmod_stacktrace_test_1"),
			     ksym_get_addr("bpf_testmod_test_read"));

cleanup:
	stacktrace_ips__destroy(skel);
}

static void __test_stacktrace_ips(void)
{
	if (test__start_subtest("kprobe_multi"))
		test_stacktrace_ips_kprobe_multi(false);
	if (test__start_subtest("kretprobe_multi"))
		test_stacktrace_ips_kprobe_multi(true);
}
#else
static void __test_stacktrace_ips(void)
{
	test__skip();
}
#endif

void test_stacktrace_ips(void)
{
	__test_stacktrace_ips();
}