1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <test_progs.h>
5 #include <bpf/btf.h>
6 
7 #include "test_log_buf.skel.h"
8 
9 
check_prog_load(int prog_fd,bool expect_err,const char * tag)10 static bool check_prog_load(int prog_fd, bool expect_err, const char *tag)
11 {
12 	if (expect_err) {
13 		if (!ASSERT_LT(prog_fd, 0, tag)) {
14 			close(prog_fd);
15 			return false;
16 		}
17 	} else /* !expect_err */ {
18 		if (!ASSERT_GT(prog_fd, 0, tag))
19 			return false;
20 	}
21 	if (prog_fd >= 0)
22 		close(prog_fd);
23 	return true;
24 }
25 
26 static struct {
27 	/* strategically placed before others to avoid accidental modification by kernel */
28 	char filler[1024];
29 	char buf[1024];
30 	/* strategically placed after buf[] to catch more accidental corruptions */
31 	char reference[1024];
32 } logs;
33 static const struct bpf_insn *insns;
34 static size_t insn_cnt;
35 
load_prog(struct bpf_prog_load_opts * opts,bool expect_load_error)36 static int load_prog(struct bpf_prog_load_opts *opts, bool expect_load_error)
37 {
38 	int prog_fd;
39 
40 	prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_prog",
41 				"GPL", insns, insn_cnt, opts);
42 	check_prog_load(prog_fd, expect_load_error, "prog_load");
43 
44 	return prog_fd;
45 }
46 
verif_log_subtest(const char * name,bool expect_load_error,int log_level)47 static void verif_log_subtest(const char *name, bool expect_load_error, int log_level)
48 {
49 	LIBBPF_OPTS(bpf_prog_load_opts, opts);
50 	char *exp_log, prog_name[16], op_name[32];
51 	struct test_log_buf *skel;
52 	struct bpf_program *prog;
53 	size_t fixed_log_sz;
54 	__u32 log_true_sz_fixed, log_true_sz_rolling;
55 	int i, mode, err, prog_fd, res;
56 
57 	skel = test_log_buf__open();
58 	if (!ASSERT_OK_PTR(skel, "skel_open"))
59 		return;
60 
61 	bpf_object__for_each_program(prog, skel->obj) {
62 		if (strcmp(bpf_program__name(prog), name) == 0)
63 			bpf_program__set_autoload(prog, true);
64 		else
65 			bpf_program__set_autoload(prog, false);
66 	}
67 
68 	err = test_log_buf__load(skel);
69 	if (!expect_load_error && !ASSERT_OK(err, "unexpected_load_failure"))
70 		goto cleanup;
71 	if (expect_load_error && !ASSERT_ERR(err, "unexpected_load_success"))
72 		goto cleanup;
73 
74 	insns = bpf_program__insns(skel->progs.good_prog);
75 	insn_cnt = bpf_program__insn_cnt(skel->progs.good_prog);
76 
77 	opts.log_buf = logs.reference;
78 	opts.log_size = sizeof(logs.reference);
79 	opts.log_level = log_level | 8 /* BPF_LOG_FIXED */;
80 	load_prog(&opts, expect_load_error);
81 
82 	fixed_log_sz = strlen(logs.reference) + 1;
83 	if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
84 		goto cleanup;
85 	memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
86 
87 	/* validate BPF_LOG_FIXED works as verifier log used to work, that is:
88 	 * we get -ENOSPC and beginning of the full verifier log. This only
89 	 * works for log_level 2 and log_level 1 + failed program. For log
90 	 * level 2 we don't reset log at all. For log_level 1 + failed program
91 	 * we don't get to verification stats output. With log level 1
92 	 * for successful program  final result will be just verifier stats.
93 	 * But if provided too short log buf, kernel will NULL-out log->ubuf
94 	 * and will stop emitting further log. This means we'll never see
95 	 * predictable verifier stats.
96 	 * Long story short, we do the following -ENOSPC test only for
97 	 * predictable combinations.
98 	 */
99 	if (log_level >= 2 || expect_load_error) {
100 		opts.log_buf = logs.buf;
101 		opts.log_level = log_level | 8; /* fixed-length log */
102 		opts.log_size = 25;
103 
104 		prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "log_fixed25",
105 					"GPL", insns, insn_cnt, &opts);
106 		if (!ASSERT_EQ(prog_fd, -ENOSPC, "unexpected_log_fixed_prog_load_result")) {
107 			if (prog_fd >= 0)
108 				close(prog_fd);
109 			goto cleanup;
110 		}
111 		if (!ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25"))
112 			goto cleanup;
113 		if (!ASSERT_STRNEQ(logs.buf, logs.reference, 24, "log_fixed_contents_25"))
114 			goto cleanup;
115 	}
116 
117 	/* validate rolling verifier log logic: try all variations of log buf
118 	 * length to force various truncation scenarios
119 	 */
120 	opts.log_buf = logs.buf;
121 
122 	/* rotating mode, then fixed mode */
123 	for (mode = 1; mode >= 0; mode--) {
124 		/* prefill logs.buf with 'A's to detect any write beyond allowed length */
125 		memset(logs.filler, 'A', sizeof(logs.filler));
126 		logs.filler[sizeof(logs.filler) - 1] = '\0';
127 		memset(logs.buf, 'A', sizeof(logs.buf));
128 		logs.buf[sizeof(logs.buf) - 1] = '\0';
129 
130 		for (i = 1; i < fixed_log_sz; i++) {
131 			opts.log_size = i;
132 			opts.log_level = log_level | (mode ? 0 : 8 /* BPF_LOG_FIXED */);
133 
134 			snprintf(prog_name, sizeof(prog_name),
135 				 "log_%s_%d", mode ? "roll" : "fixed", i);
136 			prog_fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, prog_name,
137 						"GPL", insns, insn_cnt, &opts);
138 
139 			snprintf(op_name, sizeof(op_name),
140 				 "log_%s_prog_load_%d", mode ? "roll" : "fixed", i);
141 			if (!ASSERT_EQ(prog_fd, -ENOSPC, op_name)) {
142 				if (prog_fd >= 0)
143 					close(prog_fd);
144 				goto cleanup;
145 			}
146 
147 			snprintf(op_name, sizeof(op_name),
148 				 "log_%s_strlen_%d", mode ? "roll" : "fixed", i);
149 			ASSERT_EQ(strlen(logs.buf), i - 1, op_name);
150 
151 			if (mode)
152 				exp_log = logs.reference + fixed_log_sz - i;
153 			else
154 				exp_log = logs.reference;
155 
156 			snprintf(op_name, sizeof(op_name),
157 				 "log_%s_contents_%d", mode ? "roll" : "fixed", i);
158 			if (!ASSERT_STRNEQ(logs.buf, exp_log, i - 1, op_name)) {
159 				printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
160 					strncmp(logs.buf, exp_log, i - 1),
161 					logs.buf, exp_log);
162 				goto cleanup;
163 			}
164 
165 			/* check that unused portions of logs.buf is not overwritten */
166 			snprintf(op_name, sizeof(op_name),
167 				 "log_%s_unused_%d", mode ? "roll" : "fixed", i);
168 			if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
169 				printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
170 					strcmp(logs.buf + i, logs.filler + i),
171 					logs.buf + i, logs.filler + i);
172 				goto cleanup;
173 			}
174 		}
175 	}
176 
177 	/* (FIXED) get actual log size */
178 	opts.log_buf = logs.buf;
179 	opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
180 	opts.log_size = sizeof(logs.buf);
181 	opts.log_true_size = 0;
182 	res = load_prog(&opts, expect_load_error);
183 	ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed");
184 
185 	log_true_sz_fixed = opts.log_true_size;
186 	ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
187 
188 	/* (FIXED, NULL) get actual log size */
189 	opts.log_buf = NULL;
190 	opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
191 	opts.log_size = 0;
192 	opts.log_true_size = 0;
193 	res = load_prog(&opts, expect_load_error);
194 	ASSERT_NEQ(res, -ENOSPC, "prog_load_res_fixed_null");
195 	ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
196 
197 	/* (ROLLING) get actual log size */
198 	opts.log_buf = logs.buf;
199 	opts.log_level = log_level;
200 	opts.log_size = sizeof(logs.buf);
201 	opts.log_true_size = 0;
202 	res = load_prog(&opts, expect_load_error);
203 	ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling");
204 
205 	log_true_sz_rolling = opts.log_true_size;
206 	ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
207 
208 	/* (ROLLING, NULL) get actual log size */
209 	opts.log_buf = NULL;
210 	opts.log_level = log_level;
211 	opts.log_size = 0;
212 	opts.log_true_size = 0;
213 	res = load_prog(&opts, expect_load_error);
214 	ASSERT_NEQ(res, -ENOSPC, "prog_load_res_rolling_null");
215 	ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
216 
217 	/* (FIXED) expect -ENOSPC for one byte short log */
218 	opts.log_buf = logs.buf;
219 	opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
220 	opts.log_size = log_true_sz_fixed - 1;
221 	opts.log_true_size = 0;
222 	res = load_prog(&opts, true /* should fail */);
223 	ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_fixed");
224 
225 	/* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
226 	opts.log_buf = logs.buf;
227 	opts.log_level = log_level | 8; /* BPF_LOG_FIXED */
228 	opts.log_size = log_true_sz_fixed;
229 	opts.log_true_size = 0;
230 	res = load_prog(&opts, expect_load_error);
231 	ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_fixed");
232 
233 	/* (ROLLING) expect -ENOSPC for one byte short log */
234 	opts.log_buf = logs.buf;
235 	opts.log_level = log_level;
236 	opts.log_size = log_true_sz_rolling - 1;
237 	res = load_prog(&opts, true /* should fail */);
238 	ASSERT_EQ(res, -ENOSPC, "prog_load_res_too_short_rolling");
239 
240 	/* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
241 	opts.log_buf = logs.buf;
242 	opts.log_level = log_level;
243 	opts.log_size = log_true_sz_rolling;
244 	opts.log_true_size = 0;
245 	res = load_prog(&opts, expect_load_error);
246 	ASSERT_NEQ(res, -ENOSPC, "prog_load_res_just_right_rolling");
247 
248 cleanup:
249 	test_log_buf__destroy(skel);
250 }
251 
252 static const void *btf_data;
253 static u32 btf_data_sz;
254 
load_btf(struct bpf_btf_load_opts * opts,bool expect_err)255 static int load_btf(struct bpf_btf_load_opts *opts, bool expect_err)
256 {
257 	int fd;
258 
259 	fd = bpf_btf_load(btf_data, btf_data_sz, opts);
260 	if (fd >= 0)
261 		close(fd);
262 	if (expect_err)
263 		ASSERT_LT(fd, 0, "btf_load_failure");
264 	else /* !expect_err */
265 		ASSERT_GT(fd, 0, "btf_load_success");
266 	return fd;
267 }
268 
verif_btf_log_subtest(bool bad_btf)269 static void verif_btf_log_subtest(bool bad_btf)
270 {
271 	LIBBPF_OPTS(bpf_btf_load_opts, opts);
272 	struct btf *btf;
273 	struct btf_type *t;
274 	char *exp_log, op_name[32];
275 	size_t fixed_log_sz;
276 	__u32 log_true_sz_fixed, log_true_sz_rolling;
277 	int i, res;
278 
279 	/* prepare simple BTF contents */
280 	btf = btf__new_empty();
281 	if (!ASSERT_OK_PTR(btf, "btf_new_empty"))
282 		return;
283 	res = btf__add_int(btf, "whatever", 4, 0);
284 	if (!ASSERT_GT(res, 0, "btf_add_int_id"))
285 		goto cleanup;
286 	if (bad_btf) {
287 		/* btf__add_int() doesn't allow bad value of size, so we'll just
288 		 * force-cast btf_type pointer and manually override size to invalid
289 		 * 3 if we need to simulate failure
290 		 */
291 		t = (void *)btf__type_by_id(btf, res);
292 		if (!ASSERT_OK_PTR(t, "int_btf_type"))
293 			goto cleanup;
294 		t->size = 3;
295 	}
296 
297 	btf_data = btf__raw_data(btf, &btf_data_sz);
298 	if (!ASSERT_OK_PTR(btf_data, "btf_data"))
299 		goto cleanup;
300 
301 	load_btf(&opts, bad_btf);
302 
303 	opts.log_buf = logs.reference;
304 	opts.log_size = sizeof(logs.reference);
305 	opts.log_level = 1 | 8 /* BPF_LOG_FIXED */;
306 	load_btf(&opts, bad_btf);
307 
308 	fixed_log_sz = strlen(logs.reference) + 1;
309 	if (!ASSERT_GT(fixed_log_sz, 50, "fixed_log_sz"))
310 		goto cleanup;
311 	memset(logs.reference + fixed_log_sz, 0, sizeof(logs.reference) - fixed_log_sz);
312 
313 	/* validate BPF_LOG_FIXED truncation works as verifier log used to work */
314 	opts.log_buf = logs.buf;
315 	opts.log_level = 1 | 8; /* fixed-length log */
316 	opts.log_size = 25;
317 	res = load_btf(&opts, true);
318 	ASSERT_EQ(res, -ENOSPC, "half_log_fd");
319 	ASSERT_EQ(strlen(logs.buf), 24, "log_fixed_25");
320 	ASSERT_STRNEQ(logs.buf, logs.reference, 24, op_name);
321 
322 	/* validate rolling verifier log logic: try all variations of log buf
323 	 * length to force various truncation scenarios
324 	 */
325 	opts.log_buf = logs.buf;
326 	opts.log_level = 1; /* rolling log */
327 
328 	/* prefill logs.buf with 'A's to detect any write beyond allowed length */
329 	memset(logs.filler, 'A', sizeof(logs.filler));
330 	logs.filler[sizeof(logs.filler) - 1] = '\0';
331 	memset(logs.buf, 'A', sizeof(logs.buf));
332 	logs.buf[sizeof(logs.buf) - 1] = '\0';
333 
334 	for (i = 1; i < fixed_log_sz; i++) {
335 		opts.log_size = i;
336 
337 		snprintf(op_name, sizeof(op_name), "log_roll_btf_load_%d", i);
338 		res = load_btf(&opts, true);
339 		if (!ASSERT_EQ(res, -ENOSPC, op_name))
340 			goto cleanup;
341 
342 		exp_log = logs.reference + fixed_log_sz - i;
343 		snprintf(op_name, sizeof(op_name), "log_roll_contents_%d", i);
344 		if (!ASSERT_STREQ(logs.buf, exp_log, op_name)) {
345 			printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
346 				strcmp(logs.buf, exp_log),
347 				logs.buf, exp_log);
348 			goto cleanup;
349 		}
350 
351 		/* check that unused portions of logs.buf are not overwritten */
352 		snprintf(op_name, sizeof(op_name), "log_roll_unused_tail_%d", i);
353 		if (!ASSERT_STREQ(logs.buf + i, logs.filler + i, op_name)) {
354 			printf("CMP:%d\nS1:'%s'\nS2:'%s'\n",
355 				strcmp(logs.buf + i, logs.filler + i),
356 				logs.buf + i, logs.filler + i);
357 			goto cleanup;
358 		}
359 	}
360 
361 	/* (FIXED) get actual log size */
362 	opts.log_buf = logs.buf;
363 	opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
364 	opts.log_size = sizeof(logs.buf);
365 	opts.log_true_size = 0;
366 	res = load_btf(&opts, bad_btf);
367 	ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed");
368 
369 	log_true_sz_fixed = opts.log_true_size;
370 	ASSERT_GT(log_true_sz_fixed, 0, "log_true_sz_fixed");
371 
372 	/* (FIXED, NULL) get actual log size */
373 	opts.log_buf = NULL;
374 	opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
375 	opts.log_size = 0;
376 	opts.log_true_size = 0;
377 	res = load_btf(&opts, bad_btf);
378 	ASSERT_NEQ(res, -ENOSPC, "btf_load_res_fixed_null");
379 	ASSERT_EQ(opts.log_true_size, log_true_sz_fixed, "log_sz_fixed_null_eq");
380 
381 	/* (ROLLING) get actual log size */
382 	opts.log_buf = logs.buf;
383 	opts.log_level = 1;
384 	opts.log_size = sizeof(logs.buf);
385 	opts.log_true_size = 0;
386 	res = load_btf(&opts, bad_btf);
387 	ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling");
388 
389 	log_true_sz_rolling = opts.log_true_size;
390 	ASSERT_EQ(log_true_sz_rolling, log_true_sz_fixed, "log_true_sz_eq");
391 
392 	/* (ROLLING, NULL) get actual log size */
393 	opts.log_buf = NULL;
394 	opts.log_level = 1;
395 	opts.log_size = 0;
396 	opts.log_true_size = 0;
397 	res = load_btf(&opts, bad_btf);
398 	ASSERT_NEQ(res, -ENOSPC, "btf_load_res_rolling_null");
399 	ASSERT_EQ(opts.log_true_size, log_true_sz_rolling, "log_true_sz_null_eq");
400 
401 	/* (FIXED) expect -ENOSPC for one byte short log */
402 	opts.log_buf = logs.buf;
403 	opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
404 	opts.log_size = log_true_sz_fixed - 1;
405 	opts.log_true_size = 0;
406 	res = load_btf(&opts, true);
407 	ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_fixed");
408 
409 	/* (FIXED) expect *not* -ENOSPC with exact log_true_size buffer */
410 	opts.log_buf = logs.buf;
411 	opts.log_level = 1 | 8; /* BPF_LOG_FIXED */
412 	opts.log_size = log_true_sz_fixed;
413 	opts.log_true_size = 0;
414 	res = load_btf(&opts, bad_btf);
415 	ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_fixed");
416 
417 	/* (ROLLING) expect -ENOSPC for one byte short log */
418 	opts.log_buf = logs.buf;
419 	opts.log_level = 1;
420 	opts.log_size = log_true_sz_rolling - 1;
421 	res = load_btf(&opts, true);
422 	ASSERT_EQ(res, -ENOSPC, "btf_load_res_too_short_rolling");
423 
424 	/* (ROLLING) expect *not* -ENOSPC with exact log_true_size buffer */
425 	opts.log_buf = logs.buf;
426 	opts.log_level = 1;
427 	opts.log_size = log_true_sz_rolling;
428 	opts.log_true_size = 0;
429 	res = load_btf(&opts, bad_btf);
430 	ASSERT_NEQ(res, -ENOSPC, "btf_load_res_just_right_rolling");
431 
432 cleanup:
433 	btf__free(btf);
434 }
435 
test_verifier_log(void)436 void test_verifier_log(void)
437 {
438 	if (test__start_subtest("good_prog-level1"))
439 		verif_log_subtest("good_prog", false, 1);
440 	if (test__start_subtest("good_prog-level2"))
441 		verif_log_subtest("good_prog", false, 2);
442 	if (test__start_subtest("bad_prog-level1"))
443 		verif_log_subtest("bad_prog", true, 1);
444 	if (test__start_subtest("bad_prog-level2"))
445 		verif_log_subtest("bad_prog", true, 2);
446 	if (test__start_subtest("bad_btf"))
447 		verif_btf_log_subtest(true /* bad btf */);
448 	if (test__start_subtest("good_btf"))
449 		verif_btf_log_subtest(false /* !bad btf */);
450 }
451