1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <pthread.h>
4 #include <sched.h>
5 #include <sys/socket.h>
6 #include <test_progs.h>
7 
8 #define MAX_CNT_RAWTP	10ull
9 #define MAX_STACK_RAWTP	100
10 
11 static int duration = 0;
12 
13 struct get_stack_trace_t {
14 	int pid;
15 	int kern_stack_size;
16 	int user_stack_size;
17 	int user_stack_buildid_size;
18 	__u64 kern_stack[MAX_STACK_RAWTP];
19 	__u64 user_stack[MAX_STACK_RAWTP];
20 	struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
21 };
22 
23 static void get_stack_print_output(void *ctx, int cpu, void *data, __u32 size)
24 {
25 	bool good_kern_stack = false, good_user_stack = false;
26 	const char *nonjit_func = "___bpf_prog_run";
27 	struct get_stack_trace_t *e = data;
28 	int i, num_stack;
29 	static __u64 cnt;
30 	struct ksym *ks;
31 
32 	cnt++;
33 
34 	if (size < sizeof(struct get_stack_trace_t)) {
35 		__u64 *raw_data = data;
36 		bool found = false;
37 
38 		num_stack = size / sizeof(__u64);
39 		/* If jit is enabled, we do not have a good way to
40 		 * verify the sanity of the kernel stack. So we
41 		 * just assume it is good if the stack is not empty.
42 		 * This could be improved in the future.
43 		 */
44 		if (env.jit_enabled) {
45 			found = num_stack > 0;
46 		} else {
47 			for (i = 0; i < num_stack; i++) {
48 				ks = ksym_search(raw_data[i]);
49 				if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
50 					found = true;
51 					break;
52 				}
53 			}
54 		}
55 		if (found) {
56 			good_kern_stack = true;
57 			good_user_stack = true;
58 		}
59 	} else {
60 		num_stack = e->kern_stack_size / sizeof(__u64);
61 		if (env.jit_enabled) {
62 			good_kern_stack = num_stack > 0;
63 		} else {
64 			for (i = 0; i < num_stack; i++) {
65 				ks = ksym_search(e->kern_stack[i]);
66 				if (ks && (strcmp(ks->name, nonjit_func) == 0)) {
67 					good_kern_stack = true;
68 					break;
69 				}
70 			}
71 		}
72 		if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
73 			good_user_stack = true;
74 	}
75 
76 	if (!good_kern_stack)
77 	    CHECK(!good_kern_stack, "kern_stack", "corrupted kernel stack\n");
78 	if (!good_user_stack)
79 	    CHECK(!good_user_stack, "user_stack", "corrupted user stack\n");
80 }
81 
82 void test_get_stack_raw_tp(void)
83 {
84 	const char *file = "./test_get_stack_rawtp.o";
85 	const char *file_err = "./test_get_stack_rawtp_err.o";
86 	const char *prog_name = "raw_tracepoint/sys_enter";
87 	int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
88 	struct perf_buffer_opts pb_opts = {};
89 	struct perf_buffer *pb = NULL;
90 	struct bpf_link *link = NULL;
91 	struct timespec tv = {0, 10};
92 	struct bpf_program *prog;
93 	struct bpf_object *obj;
94 	struct bpf_map *map;
95 	cpu_set_t cpu_set;
96 
97 	err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
98 	if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
99 		return;
100 
101 	err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
102 	if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
103 		return;
104 
105 	prog = bpf_object__find_program_by_title(obj, prog_name);
106 	if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
107 		goto close_prog;
108 
109 	map = bpf_object__find_map_by_name(obj, "perfmap");
110 	if (CHECK(!map, "bpf_find_map", "not found\n"))
111 		goto close_prog;
112 
113 	err = load_kallsyms();
114 	if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
115 		goto close_prog;
116 
117 	CPU_ZERO(&cpu_set);
118 	CPU_SET(0, &cpu_set);
119 	err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
120 	if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
121 		goto close_prog;
122 
123 	link = bpf_program__attach_raw_tracepoint(prog, "sys_enter");
124 	if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link)))
125 		goto close_prog;
126 
127 	pb_opts.sample_cb = get_stack_print_output;
128 	pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts);
129 	if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
130 		goto close_prog;
131 
132 	/* trigger some syscall action */
133 	for (i = 0; i < MAX_CNT_RAWTP; i++)
134 		nanosleep(&tv, NULL);
135 
136 	while (exp_cnt > 0) {
137 		err = perf_buffer__poll(pb, 100);
138 		if (err < 0 && CHECK(err < 0, "pb__poll", "err %d\n", err))
139 			goto close_prog;
140 		exp_cnt -= err;
141 	}
142 
143 close_prog:
144 	if (!IS_ERR_OR_NULL(link))
145 		bpf_link__destroy(link);
146 	if (!IS_ERR_OR_NULL(pb))
147 		perf_buffer__free(pb);
148 	bpf_object__close(obj);
149 }
150