1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Testsuite for eBPF verifier 4 * 5 * Copyright (c) 2014 PLUMgrid, http://plumgrid.com 6 * Copyright (c) 2017 Facebook 7 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io 8 */ 9 10 #include <endian.h> 11 #include <asm/types.h> 12 #include <linux/types.h> 13 #include <stdint.h> 14 #include <stdio.h> 15 #include <stdlib.h> 16 #include <unistd.h> 17 #include <errno.h> 18 #include <string.h> 19 #include <stddef.h> 20 #include <stdbool.h> 21 #include <sched.h> 22 #include <limits.h> 23 #include <assert.h> 24 25 #include <linux/unistd.h> 26 #include <linux/filter.h> 27 #include <linux/bpf_perf_event.h> 28 #include <linux/bpf.h> 29 #include <linux/if_ether.h> 30 #include <linux/btf.h> 31 32 #include <bpf/btf.h> 33 #include <bpf/bpf.h> 34 #include <bpf/libbpf.h> 35 36 #ifdef HAVE_GENHDR 37 # include "autoconf.h" 38 #else 39 # if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__) 40 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 41 # endif 42 #endif 43 #include "cap_helpers.h" 44 #include "bpf_rand.h" 45 #include "bpf_util.h" 46 #include "test_btf.h" 47 #include "../../../include/linux/filter.h" 48 49 #ifndef ENOTSUPP 50 #define ENOTSUPP 524 51 #endif 52 53 #define MAX_INSNS BPF_MAXINSNS 54 #define MAX_TEST_INSNS 1000000 55 #define MAX_FIXUPS 8 56 #define MAX_NR_MAPS 22 57 #define MAX_TEST_RUNS 8 58 #define POINTER_VALUE 0xcafe4all 59 #define TEST_DATA_LEN 64 60 61 #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) 62 #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) 63 64 /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */ 65 #define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \ 66 1ULL << CAP_PERFMON | \ 67 1ULL << CAP_BPF) 68 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" 69 static bool unpriv_disabled = false; 70 static int skips; 71 static bool verbose = false; 72 73 struct kfunc_btf_id_pair { 74 const char *kfunc; 75 int insn_idx; 76 }; 77 78 struct bpf_test { 79 const char *descr; 80 struct bpf_insn insns[MAX_INSNS]; 81 struct bpf_insn *fill_insns; 82 int fixup_map_hash_8b[MAX_FIXUPS]; 83 int fixup_map_hash_48b[MAX_FIXUPS]; 84 int fixup_map_hash_16b[MAX_FIXUPS]; 85 int fixup_map_array_48b[MAX_FIXUPS]; 86 int fixup_map_sockmap[MAX_FIXUPS]; 87 int fixup_map_sockhash[MAX_FIXUPS]; 88 int fixup_map_xskmap[MAX_FIXUPS]; 89 int fixup_map_stacktrace[MAX_FIXUPS]; 90 int fixup_prog1[MAX_FIXUPS]; 91 int fixup_prog2[MAX_FIXUPS]; 92 int fixup_map_in_map[MAX_FIXUPS]; 93 int fixup_cgroup_storage[MAX_FIXUPS]; 94 int fixup_percpu_cgroup_storage[MAX_FIXUPS]; 95 int fixup_map_spin_lock[MAX_FIXUPS]; 96 int fixup_map_array_ro[MAX_FIXUPS]; 97 int fixup_map_array_wo[MAX_FIXUPS]; 98 int fixup_map_array_small[MAX_FIXUPS]; 99 int fixup_sk_storage_map[MAX_FIXUPS]; 100 int fixup_map_event_output[MAX_FIXUPS]; 101 int fixup_map_reuseport_array[MAX_FIXUPS]; 102 int fixup_map_ringbuf[MAX_FIXUPS]; 103 int fixup_map_timer[MAX_FIXUPS]; 104 struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS]; 105 /* Expected verifier log output for result REJECT or VERBOSE_ACCEPT. 106 * Can be a tab-separated sequence of expected strings. An empty string 107 * means no log verification. 108 */ 109 const char *errstr; 110 const char *errstr_unpriv; 111 uint32_t insn_processed; 112 int prog_len; 113 enum { 114 UNDEF, 115 ACCEPT, 116 REJECT, 117 VERBOSE_ACCEPT, 118 } result, result_unpriv; 119 enum bpf_prog_type prog_type; 120 uint8_t flags; 121 void (*fill_helper)(struct bpf_test *self); 122 int runs; 123 #define bpf_testdata_struct_t \ 124 struct { \ 125 uint32_t retval, retval_unpriv; \ 126 union { \ 127 __u8 data[TEST_DATA_LEN]; \ 128 __u64 data64[TEST_DATA_LEN / 8]; \ 129 }; \ 130 } 131 union { 132 bpf_testdata_struct_t; 133 bpf_testdata_struct_t retvals[MAX_TEST_RUNS]; 134 }; 135 enum bpf_attach_type expected_attach_type; 136 const char *kfunc; 137 }; 138 139 /* Note we want this to be 64 bit aligned so that the end of our array is 140 * actually the end of the structure. 141 */ 142 #define MAX_ENTRIES 11 143 144 struct test_val { 145 unsigned int index; 146 int foo[MAX_ENTRIES]; 147 }; 148 149 struct other_val { 150 long long foo; 151 long long bar; 152 }; 153 154 static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) 155 { 156 /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */ 157 #define PUSH_CNT 51 158 /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */ 159 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6; 160 struct bpf_insn *insn = self->fill_insns; 161 int i = 0, j, k = 0; 162 163 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 164 loop: 165 for (j = 0; j < PUSH_CNT; j++) { 166 insn[i++] = BPF_LD_ABS(BPF_B, 0); 167 /* jump to error label */ 168 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); 169 i++; 170 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 171 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1); 172 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2); 173 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 174 BPF_FUNC_skb_vlan_push), 175 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); 176 i++; 177 } 178 179 for (j = 0; j < PUSH_CNT; j++) { 180 insn[i++] = BPF_LD_ABS(BPF_B, 0); 181 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3); 182 i++; 183 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); 184 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 185 BPF_FUNC_skb_vlan_pop), 186 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3); 187 i++; 188 } 189 if (++k < 5) 190 goto loop; 191 192 for (; i < len - 3; i++) 193 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef); 194 insn[len - 3] = BPF_JMP_A(1); 195 /* error label */ 196 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0); 197 insn[len - 1] = BPF_EXIT_INSN(); 198 self->prog_len = len; 199 } 200 201 static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) 202 { 203 struct bpf_insn *insn = self->fill_insns; 204 /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns, 205 * but on arches like arm, ppc etc, there will be one BPF_ZEXT inserted 206 * to extend the error value of the inlined ld_abs sequence which then 207 * contains 7 insns. so, set the dividend to 7 so the testcase could 208 * work on all arches. 209 */ 210 unsigned int len = (1 << 15) / 7; 211 int i = 0; 212 213 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 214 insn[i++] = BPF_LD_ABS(BPF_B, 0); 215 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2); 216 i++; 217 while (i < len - 1) 218 insn[i++] = BPF_LD_ABS(BPF_B, 1); 219 insn[i] = BPF_EXIT_INSN(); 220 self->prog_len = i + 1; 221 } 222 223 static void bpf_fill_rand_ld_dw(struct bpf_test *self) 224 { 225 struct bpf_insn *insn = self->fill_insns; 226 uint64_t res = 0; 227 int i = 0; 228 229 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0); 230 while (i < self->retval) { 231 uint64_t val = bpf_semi_rand_get(); 232 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) }; 233 234 res ^= val; 235 insn[i++] = tmp[0]; 236 insn[i++] = tmp[1]; 237 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1); 238 } 239 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0); 240 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32); 241 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1); 242 insn[i] = BPF_EXIT_INSN(); 243 self->prog_len = i + 1; 244 res ^= (res >> 32); 245 self->retval = (uint32_t)res; 246 } 247 248 #define MAX_JMP_SEQ 8192 249 250 /* test the sequence of 8k jumps */ 251 static void bpf_fill_scale1(struct bpf_test *self) 252 { 253 struct bpf_insn *insn = self->fill_insns; 254 int i = 0, k = 0; 255 256 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 257 /* test to check that the long sequence of jumps is acceptable */ 258 while (k++ < MAX_JMP_SEQ) { 259 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 260 BPF_FUNC_get_prandom_u32); 261 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); 262 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); 263 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 264 -8 * (k % 64 + 1)); 265 } 266 /* is_state_visited() doesn't allocate state for pruning for every jump. 267 * Hence multiply jmps by 4 to accommodate that heuristic 268 */ 269 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) 270 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); 271 insn[i] = BPF_EXIT_INSN(); 272 self->prog_len = i + 1; 273 self->retval = 42; 274 } 275 276 /* test the sequence of 8k jumps in inner most function (function depth 8)*/ 277 static void bpf_fill_scale2(struct bpf_test *self) 278 { 279 struct bpf_insn *insn = self->fill_insns; 280 int i = 0, k = 0; 281 282 #define FUNC_NEST 7 283 for (k = 0; k < FUNC_NEST; k++) { 284 insn[i++] = BPF_CALL_REL(1); 285 insn[i++] = BPF_EXIT_INSN(); 286 } 287 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); 288 /* test to check that the long sequence of jumps is acceptable */ 289 k = 0; 290 while (k++ < MAX_JMP_SEQ) { 291 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 292 BPF_FUNC_get_prandom_u32); 293 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2); 294 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10); 295 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 296 -8 * (k % (64 - 4 * FUNC_NEST) + 1)); 297 } 298 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4) 299 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42); 300 insn[i] = BPF_EXIT_INSN(); 301 self->prog_len = i + 1; 302 self->retval = 42; 303 } 304 305 static void bpf_fill_scale(struct bpf_test *self) 306 { 307 switch (self->retval) { 308 case 1: 309 return bpf_fill_scale1(self); 310 case 2: 311 return bpf_fill_scale2(self); 312 default: 313 self->prog_len = 0; 314 break; 315 } 316 } 317 318 static int bpf_fill_torturous_jumps_insn_1(struct bpf_insn *insn) 319 { 320 unsigned int len = 259, hlen = 128; 321 int i; 322 323 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32); 324 for (i = 1; i <= hlen; i++) { 325 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, hlen); 326 insn[i + hlen] = BPF_JMP_A(hlen - i); 327 } 328 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 1); 329 insn[len - 1] = BPF_EXIT_INSN(); 330 331 return len; 332 } 333 334 static int bpf_fill_torturous_jumps_insn_2(struct bpf_insn *insn) 335 { 336 unsigned int len = 4100, jmp_off = 2048; 337 int i, j; 338 339 insn[0] = BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32); 340 for (i = 1; i <= jmp_off; i++) { 341 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, i, jmp_off); 342 } 343 insn[i++] = BPF_JMP_A(jmp_off); 344 for (; i <= jmp_off * 2 + 1; i+=16) { 345 for (j = 0; j < 16; j++) { 346 insn[i + j] = BPF_JMP_A(16 - j - 1); 347 } 348 } 349 350 insn[len - 2] = BPF_MOV64_IMM(BPF_REG_0, 2); 351 insn[len - 1] = BPF_EXIT_INSN(); 352 353 return len; 354 } 355 356 static void bpf_fill_torturous_jumps(struct bpf_test *self) 357 { 358 struct bpf_insn *insn = self->fill_insns; 359 int i = 0; 360 361 switch (self->retval) { 362 case 1: 363 self->prog_len = bpf_fill_torturous_jumps_insn_1(insn); 364 return; 365 case 2: 366 self->prog_len = bpf_fill_torturous_jumps_insn_2(insn); 367 return; 368 case 3: 369 /* main */ 370 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4); 371 insn[i++] = BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 262); 372 insn[i++] = BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0); 373 insn[i++] = BPF_MOV64_IMM(BPF_REG_0, 3); 374 insn[i++] = BPF_EXIT_INSN(); 375 376 /* subprog 1 */ 377 i += bpf_fill_torturous_jumps_insn_1(insn + i); 378 379 /* subprog 2 */ 380 i += bpf_fill_torturous_jumps_insn_2(insn + i); 381 382 self->prog_len = i; 383 return; 384 default: 385 self->prog_len = 0; 386 break; 387 } 388 } 389 390 /* BPF_SK_LOOKUP contains 13 instructions, if you need to fix up maps */ 391 #define BPF_SK_LOOKUP(func) \ 392 /* struct bpf_sock_tuple tuple = {} */ \ 393 BPF_MOV64_IMM(BPF_REG_2, 0), \ 394 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \ 395 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \ 396 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \ 397 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \ 398 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \ 399 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \ 400 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \ 401 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \ 402 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \ 403 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \ 404 BPF_MOV64_IMM(BPF_REG_4, 0), \ 405 BPF_MOV64_IMM(BPF_REG_5, 0), \ 406 BPF_EMIT_CALL(BPF_FUNC_ ## func) 407 408 /* BPF_DIRECT_PKT_R2 contains 7 instructions, it initializes default return 409 * value into 0 and does necessary preparation for direct packet access 410 * through r2. The allowed access range is 8 bytes. 411 */ 412 #define BPF_DIRECT_PKT_R2 \ 413 BPF_MOV64_IMM(BPF_REG_0, 0), \ 414 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ 415 offsetof(struct __sk_buff, data)), \ 416 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \ 417 offsetof(struct __sk_buff, data_end)), \ 418 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \ 419 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \ 420 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \ 421 BPF_EXIT_INSN() 422 423 /* BPF_RAND_UEXT_R7 contains 4 instructions, it initializes R7 into a random 424 * positive u32, and zero-extend it into 64-bit. 425 */ 426 #define BPF_RAND_UEXT_R7 \ 427 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ 428 BPF_FUNC_get_prandom_u32), \ 429 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \ 430 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \ 431 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33) 432 433 /* BPF_RAND_SEXT_R7 contains 5 instructions, it initializes R7 into a random 434 * negative u32, and sign-extend it into 64-bit. 435 */ 436 #define BPF_RAND_SEXT_R7 \ 437 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \ 438 BPF_FUNC_get_prandom_u32), \ 439 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \ 440 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \ 441 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \ 442 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32) 443 444 static struct bpf_test tests[] = { 445 #define FILL_ARRAY 446 #include <verifier/tests.h> 447 #undef FILL_ARRAY 448 }; 449 450 static int probe_filter_length(const struct bpf_insn *fp) 451 { 452 int len; 453 454 for (len = MAX_INSNS - 1; len > 0; --len) 455 if (fp[len].code != 0 || fp[len].imm != 0) 456 break; 457 return len + 1; 458 } 459 460 static bool skip_unsupported_map(enum bpf_map_type map_type) 461 { 462 if (!libbpf_probe_bpf_map_type(map_type, NULL)) { 463 printf("SKIP (unsupported map type %d)\n", map_type); 464 skips++; 465 return true; 466 } 467 return false; 468 } 469 470 static int __create_map(uint32_t type, uint32_t size_key, 471 uint32_t size_value, uint32_t max_elem, 472 uint32_t extra_flags) 473 { 474 LIBBPF_OPTS(bpf_map_create_opts, opts); 475 int fd; 476 477 opts.map_flags = (type == BPF_MAP_TYPE_HASH ? BPF_F_NO_PREALLOC : 0) | extra_flags; 478 fd = bpf_map_create(type, NULL, size_key, size_value, max_elem, &opts); 479 if (fd < 0) { 480 if (skip_unsupported_map(type)) 481 return -1; 482 printf("Failed to create hash map '%s'!\n", strerror(errno)); 483 } 484 485 return fd; 486 } 487 488 static int create_map(uint32_t type, uint32_t size_key, 489 uint32_t size_value, uint32_t max_elem) 490 { 491 return __create_map(type, size_key, size_value, max_elem, 0); 492 } 493 494 static void update_map(int fd, int index) 495 { 496 struct test_val value = { 497 .index = (6 + 1) * sizeof(int), 498 .foo[6] = 0xabcdef12, 499 }; 500 501 assert(!bpf_map_update_elem(fd, &index, &value, 0)); 502 } 503 504 static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret) 505 { 506 struct bpf_insn prog[] = { 507 BPF_MOV64_IMM(BPF_REG_0, ret), 508 BPF_EXIT_INSN(), 509 }; 510 511 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL); 512 } 513 514 static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd, 515 int idx, int ret) 516 { 517 struct bpf_insn prog[] = { 518 BPF_MOV64_IMM(BPF_REG_3, idx), 519 BPF_LD_MAP_FD(BPF_REG_2, mfd), 520 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 521 BPF_FUNC_tail_call), 522 BPF_MOV64_IMM(BPF_REG_0, ret), 523 BPF_EXIT_INSN(), 524 }; 525 526 return bpf_prog_load(prog_type, NULL, "GPL", prog, ARRAY_SIZE(prog), NULL); 527 } 528 529 static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem, 530 int p1key, int p2key, int p3key) 531 { 532 int mfd, p1fd, p2fd, p3fd; 533 534 mfd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, NULL, sizeof(int), 535 sizeof(int), max_elem, NULL); 536 if (mfd < 0) { 537 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY)) 538 return -1; 539 printf("Failed to create prog array '%s'!\n", strerror(errno)); 540 return -1; 541 } 542 543 p1fd = create_prog_dummy_simple(prog_type, 42); 544 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41); 545 p3fd = create_prog_dummy_simple(prog_type, 24); 546 if (p1fd < 0 || p2fd < 0 || p3fd < 0) 547 goto err; 548 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) 549 goto err; 550 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0) 551 goto err; 552 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) { 553 err: 554 close(mfd); 555 mfd = -1; 556 } 557 close(p3fd); 558 close(p2fd); 559 close(p1fd); 560 return mfd; 561 } 562 563 static int create_map_in_map(void) 564 { 565 LIBBPF_OPTS(bpf_map_create_opts, opts); 566 int inner_map_fd, outer_map_fd; 567 568 inner_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, sizeof(int), 569 sizeof(int), 1, NULL); 570 if (inner_map_fd < 0) { 571 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY)) 572 return -1; 573 printf("Failed to create array '%s'!\n", strerror(errno)); 574 return inner_map_fd; 575 } 576 577 opts.inner_map_fd = inner_map_fd; 578 outer_map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL, 579 sizeof(int), sizeof(int), 1, &opts); 580 if (outer_map_fd < 0) { 581 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS)) 582 return -1; 583 printf("Failed to create array of maps '%s'!\n", 584 strerror(errno)); 585 } 586 587 close(inner_map_fd); 588 589 return outer_map_fd; 590 } 591 592 static int create_cgroup_storage(bool percpu) 593 { 594 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE : 595 BPF_MAP_TYPE_CGROUP_STORAGE; 596 int fd; 597 598 fd = bpf_map_create(type, NULL, sizeof(struct bpf_cgroup_storage_key), 599 TEST_DATA_LEN, 0, NULL); 600 if (fd < 0) { 601 if (skip_unsupported_map(type)) 602 return -1; 603 printf("Failed to create cgroup storage '%s'!\n", 604 strerror(errno)); 605 } 606 607 return fd; 608 } 609 610 /* struct bpf_spin_lock { 611 * int val; 612 * }; 613 * struct val { 614 * int cnt; 615 * struct bpf_spin_lock l; 616 * }; 617 * struct bpf_timer { 618 * __u64 :64; 619 * __u64 :64; 620 * } __attribute__((aligned(8))); 621 * struct timer { 622 * struct bpf_timer t; 623 * }; 624 */ 625 static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"; 626 static __u32 btf_raw_types[] = { 627 /* int */ 628 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 629 /* struct bpf_spin_lock */ /* [2] */ 630 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), 631 BTF_MEMBER_ENC(15, 1, 0), /* int val; */ 632 /* struct val */ /* [3] */ 633 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8), 634 BTF_MEMBER_ENC(19, 1, 0), /* int cnt; */ 635 BTF_MEMBER_ENC(23, 2, 32),/* struct bpf_spin_lock l; */ 636 /* struct bpf_timer */ /* [4] */ 637 BTF_TYPE_ENC(25, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0), 16), 638 /* struct timer */ /* [5] */ 639 BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16), 640 BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */ 641 }; 642 643 static int load_btf(void) 644 { 645 struct btf_header hdr = { 646 .magic = BTF_MAGIC, 647 .version = BTF_VERSION, 648 .hdr_len = sizeof(struct btf_header), 649 .type_len = sizeof(btf_raw_types), 650 .str_off = sizeof(btf_raw_types), 651 .str_len = sizeof(btf_str_sec), 652 }; 653 void *ptr, *raw_btf; 654 int btf_fd; 655 656 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) + 657 sizeof(btf_str_sec)); 658 659 memcpy(ptr, &hdr, sizeof(hdr)); 660 ptr += sizeof(hdr); 661 memcpy(ptr, btf_raw_types, hdr.type_len); 662 ptr += hdr.type_len; 663 memcpy(ptr, btf_str_sec, hdr.str_len); 664 ptr += hdr.str_len; 665 666 btf_fd = bpf_btf_load(raw_btf, ptr - raw_btf, NULL); 667 free(raw_btf); 668 if (btf_fd < 0) 669 return -1; 670 return btf_fd; 671 } 672 673 static int create_map_spin_lock(void) 674 { 675 LIBBPF_OPTS(bpf_map_create_opts, opts, 676 .btf_key_type_id = 1, 677 .btf_value_type_id = 3, 678 ); 679 int fd, btf_fd; 680 681 btf_fd = load_btf(); 682 if (btf_fd < 0) 683 return -1; 684 opts.btf_fd = btf_fd; 685 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 8, 1, &opts); 686 if (fd < 0) 687 printf("Failed to create map with spin_lock\n"); 688 return fd; 689 } 690 691 static int create_sk_storage_map(void) 692 { 693 LIBBPF_OPTS(bpf_map_create_opts, opts, 694 .map_flags = BPF_F_NO_PREALLOC, 695 .btf_key_type_id = 1, 696 .btf_value_type_id = 3, 697 ); 698 int fd, btf_fd; 699 700 btf_fd = load_btf(); 701 if (btf_fd < 0) 702 return -1; 703 opts.btf_fd = btf_fd; 704 fd = bpf_map_create(BPF_MAP_TYPE_SK_STORAGE, "test_map", 4, 8, 0, &opts); 705 close(opts.btf_fd); 706 if (fd < 0) 707 printf("Failed to create sk_storage_map\n"); 708 return fd; 709 } 710 711 static int create_map_timer(void) 712 { 713 LIBBPF_OPTS(bpf_map_create_opts, opts, 714 .btf_key_type_id = 1, 715 .btf_value_type_id = 5, 716 ); 717 int fd, btf_fd; 718 719 btf_fd = load_btf(); 720 if (btf_fd < 0) 721 return -1; 722 723 opts.btf_fd = btf_fd; 724 fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 16, 1, &opts); 725 if (fd < 0) 726 printf("Failed to create map with timer\n"); 727 return fd; 728 } 729 730 static char bpf_vlog[UINT_MAX >> 8]; 731 732 static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, 733 struct bpf_insn *prog, int *map_fds) 734 { 735 int *fixup_map_hash_8b = test->fixup_map_hash_8b; 736 int *fixup_map_hash_48b = test->fixup_map_hash_48b; 737 int *fixup_map_hash_16b = test->fixup_map_hash_16b; 738 int *fixup_map_array_48b = test->fixup_map_array_48b; 739 int *fixup_map_sockmap = test->fixup_map_sockmap; 740 int *fixup_map_sockhash = test->fixup_map_sockhash; 741 int *fixup_map_xskmap = test->fixup_map_xskmap; 742 int *fixup_map_stacktrace = test->fixup_map_stacktrace; 743 int *fixup_prog1 = test->fixup_prog1; 744 int *fixup_prog2 = test->fixup_prog2; 745 int *fixup_map_in_map = test->fixup_map_in_map; 746 int *fixup_cgroup_storage = test->fixup_cgroup_storage; 747 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage; 748 int *fixup_map_spin_lock = test->fixup_map_spin_lock; 749 int *fixup_map_array_ro = test->fixup_map_array_ro; 750 int *fixup_map_array_wo = test->fixup_map_array_wo; 751 int *fixup_map_array_small = test->fixup_map_array_small; 752 int *fixup_sk_storage_map = test->fixup_sk_storage_map; 753 int *fixup_map_event_output = test->fixup_map_event_output; 754 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array; 755 int *fixup_map_ringbuf = test->fixup_map_ringbuf; 756 int *fixup_map_timer = test->fixup_map_timer; 757 struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id; 758 759 if (test->fill_helper) { 760 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); 761 test->fill_helper(test); 762 } 763 764 /* Allocating HTs with 1 elem is fine here, since we only test 765 * for verifier and not do a runtime lookup, so the only thing 766 * that really matters is value size in this case. 767 */ 768 if (*fixup_map_hash_8b) { 769 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 770 sizeof(long long), 1); 771 do { 772 prog[*fixup_map_hash_8b].imm = map_fds[0]; 773 fixup_map_hash_8b++; 774 } while (*fixup_map_hash_8b); 775 } 776 777 if (*fixup_map_hash_48b) { 778 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 779 sizeof(struct test_val), 1); 780 do { 781 prog[*fixup_map_hash_48b].imm = map_fds[1]; 782 fixup_map_hash_48b++; 783 } while (*fixup_map_hash_48b); 784 } 785 786 if (*fixup_map_hash_16b) { 787 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long), 788 sizeof(struct other_val), 1); 789 do { 790 prog[*fixup_map_hash_16b].imm = map_fds[2]; 791 fixup_map_hash_16b++; 792 } while (*fixup_map_hash_16b); 793 } 794 795 if (*fixup_map_array_48b) { 796 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 797 sizeof(struct test_val), 1); 798 update_map(map_fds[3], 0); 799 do { 800 prog[*fixup_map_array_48b].imm = map_fds[3]; 801 fixup_map_array_48b++; 802 } while (*fixup_map_array_48b); 803 } 804 805 if (*fixup_prog1) { 806 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2); 807 do { 808 prog[*fixup_prog1].imm = map_fds[4]; 809 fixup_prog1++; 810 } while (*fixup_prog1); 811 } 812 813 if (*fixup_prog2) { 814 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2); 815 do { 816 prog[*fixup_prog2].imm = map_fds[5]; 817 fixup_prog2++; 818 } while (*fixup_prog2); 819 } 820 821 if (*fixup_map_in_map) { 822 map_fds[6] = create_map_in_map(); 823 do { 824 prog[*fixup_map_in_map].imm = map_fds[6]; 825 fixup_map_in_map++; 826 } while (*fixup_map_in_map); 827 } 828 829 if (*fixup_cgroup_storage) { 830 map_fds[7] = create_cgroup_storage(false); 831 do { 832 prog[*fixup_cgroup_storage].imm = map_fds[7]; 833 fixup_cgroup_storage++; 834 } while (*fixup_cgroup_storage); 835 } 836 837 if (*fixup_percpu_cgroup_storage) { 838 map_fds[8] = create_cgroup_storage(true); 839 do { 840 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8]; 841 fixup_percpu_cgroup_storage++; 842 } while (*fixup_percpu_cgroup_storage); 843 } 844 if (*fixup_map_sockmap) { 845 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int), 846 sizeof(int), 1); 847 do { 848 prog[*fixup_map_sockmap].imm = map_fds[9]; 849 fixup_map_sockmap++; 850 } while (*fixup_map_sockmap); 851 } 852 if (*fixup_map_sockhash) { 853 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int), 854 sizeof(int), 1); 855 do { 856 prog[*fixup_map_sockhash].imm = map_fds[10]; 857 fixup_map_sockhash++; 858 } while (*fixup_map_sockhash); 859 } 860 if (*fixup_map_xskmap) { 861 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int), 862 sizeof(int), 1); 863 do { 864 prog[*fixup_map_xskmap].imm = map_fds[11]; 865 fixup_map_xskmap++; 866 } while (*fixup_map_xskmap); 867 } 868 if (*fixup_map_stacktrace) { 869 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32), 870 sizeof(u64), 1); 871 do { 872 prog[*fixup_map_stacktrace].imm = map_fds[12]; 873 fixup_map_stacktrace++; 874 } while (*fixup_map_stacktrace); 875 } 876 if (*fixup_map_spin_lock) { 877 map_fds[13] = create_map_spin_lock(); 878 do { 879 prog[*fixup_map_spin_lock].imm = map_fds[13]; 880 fixup_map_spin_lock++; 881 } while (*fixup_map_spin_lock); 882 } 883 if (*fixup_map_array_ro) { 884 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 885 sizeof(struct test_val), 1, 886 BPF_F_RDONLY_PROG); 887 update_map(map_fds[14], 0); 888 do { 889 prog[*fixup_map_array_ro].imm = map_fds[14]; 890 fixup_map_array_ro++; 891 } while (*fixup_map_array_ro); 892 } 893 if (*fixup_map_array_wo) { 894 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 895 sizeof(struct test_val), 1, 896 BPF_F_WRONLY_PROG); 897 update_map(map_fds[15], 0); 898 do { 899 prog[*fixup_map_array_wo].imm = map_fds[15]; 900 fixup_map_array_wo++; 901 } while (*fixup_map_array_wo); 902 } 903 if (*fixup_map_array_small) { 904 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int), 905 1, 1, 0); 906 update_map(map_fds[16], 0); 907 do { 908 prog[*fixup_map_array_small].imm = map_fds[16]; 909 fixup_map_array_small++; 910 } while (*fixup_map_array_small); 911 } 912 if (*fixup_sk_storage_map) { 913 map_fds[17] = create_sk_storage_map(); 914 do { 915 prog[*fixup_sk_storage_map].imm = map_fds[17]; 916 fixup_sk_storage_map++; 917 } while (*fixup_sk_storage_map); 918 } 919 if (*fixup_map_event_output) { 920 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY, 921 sizeof(int), sizeof(int), 1, 0); 922 do { 923 prog[*fixup_map_event_output].imm = map_fds[18]; 924 fixup_map_event_output++; 925 } while (*fixup_map_event_output); 926 } 927 if (*fixup_map_reuseport_array) { 928 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, 929 sizeof(u32), sizeof(u64), 1, 0); 930 do { 931 prog[*fixup_map_reuseport_array].imm = map_fds[19]; 932 fixup_map_reuseport_array++; 933 } while (*fixup_map_reuseport_array); 934 } 935 if (*fixup_map_ringbuf) { 936 map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0, 937 0, 4096); 938 do { 939 prog[*fixup_map_ringbuf].imm = map_fds[20]; 940 fixup_map_ringbuf++; 941 } while (*fixup_map_ringbuf); 942 } 943 if (*fixup_map_timer) { 944 map_fds[21] = create_map_timer(); 945 do { 946 prog[*fixup_map_timer].imm = map_fds[21]; 947 fixup_map_timer++; 948 } while (*fixup_map_timer); 949 } 950 951 /* Patch in kfunc BTF IDs */ 952 if (fixup_kfunc_btf_id->kfunc) { 953 struct btf *btf; 954 int btf_id; 955 956 do { 957 btf_id = 0; 958 btf = btf__load_vmlinux_btf(); 959 if (btf) { 960 btf_id = btf__find_by_name_kind(btf, 961 fixup_kfunc_btf_id->kfunc, 962 BTF_KIND_FUNC); 963 btf_id = btf_id < 0 ? 0 : btf_id; 964 } 965 btf__free(btf); 966 prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id; 967 fixup_kfunc_btf_id++; 968 } while (fixup_kfunc_btf_id->kfunc); 969 } 970 } 971 972 struct libcap { 973 struct __user_cap_header_struct hdr; 974 struct __user_cap_data_struct data[2]; 975 }; 976 977 static int set_admin(bool admin) 978 { 979 int err; 980 981 if (admin) { 982 err = cap_enable_effective(ADMIN_CAPS, NULL); 983 if (err) 984 perror("cap_enable_effective(ADMIN_CAPS)"); 985 } else { 986 err = cap_disable_effective(ADMIN_CAPS, NULL); 987 if (err) 988 perror("cap_disable_effective(ADMIN_CAPS)"); 989 } 990 991 return err; 992 } 993 994 static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val, 995 void *data, size_t size_data) 996 { 997 __u8 tmp[TEST_DATA_LEN << 2]; 998 __u32 size_tmp = sizeof(tmp); 999 int err, saved_errno; 1000 LIBBPF_OPTS(bpf_test_run_opts, topts, 1001 .data_in = data, 1002 .data_size_in = size_data, 1003 .data_out = tmp, 1004 .data_size_out = size_tmp, 1005 .repeat = 1, 1006 ); 1007 1008 if (unpriv) 1009 set_admin(true); 1010 err = bpf_prog_test_run_opts(fd_prog, &topts); 1011 saved_errno = errno; 1012 1013 if (unpriv) 1014 set_admin(false); 1015 1016 if (err) { 1017 switch (saved_errno) { 1018 case ENOTSUPP: 1019 printf("Did not run the program (not supported) "); 1020 return 0; 1021 case EPERM: 1022 if (unpriv) { 1023 printf("Did not run the program (no permission) "); 1024 return 0; 1025 } 1026 /* fallthrough; */ 1027 default: 1028 printf("FAIL: Unexpected bpf_prog_test_run error (%s) ", 1029 strerror(saved_errno)); 1030 return err; 1031 } 1032 } 1033 1034 if (topts.retval != expected_val && expected_val != POINTER_VALUE) { 1035 printf("FAIL retval %d != %d ", topts.retval, expected_val); 1036 return 1; 1037 } 1038 1039 return 0; 1040 } 1041 1042 /* Returns true if every part of exp (tab-separated) appears in log, in order. 1043 * 1044 * If exp is an empty string, returns true. 1045 */ 1046 static bool cmp_str_seq(const char *log, const char *exp) 1047 { 1048 char needle[200]; 1049 const char *p, *q; 1050 int len; 1051 1052 do { 1053 if (!strlen(exp)) 1054 break; 1055 p = strchr(exp, '\t'); 1056 if (!p) 1057 p = exp + strlen(exp); 1058 1059 len = p - exp; 1060 if (len >= sizeof(needle) || !len) { 1061 printf("FAIL\nTestcase bug\n"); 1062 return false; 1063 } 1064 strncpy(needle, exp, len); 1065 needle[len] = 0; 1066 q = strstr(log, needle); 1067 if (!q) { 1068 printf("FAIL\nUnexpected verifier log!\n" 1069 "EXP: %s\nRES:\n", needle); 1070 return false; 1071 } 1072 log = q + len; 1073 exp = p + 1; 1074 } while (*p); 1075 return true; 1076 } 1077 1078 static void do_test_single(struct bpf_test *test, bool unpriv, 1079 int *passes, int *errors) 1080 { 1081 int fd_prog, expected_ret, alignment_prevented_execution; 1082 int prog_len, prog_type = test->prog_type; 1083 struct bpf_insn *prog = test->insns; 1084 LIBBPF_OPTS(bpf_prog_load_opts, opts); 1085 int run_errs, run_successes; 1086 int map_fds[MAX_NR_MAPS]; 1087 const char *expected_err; 1088 int saved_errno; 1089 int fixup_skips; 1090 __u32 pflags; 1091 int i, err; 1092 1093 for (i = 0; i < MAX_NR_MAPS; i++) 1094 map_fds[i] = -1; 1095 1096 if (!prog_type) 1097 prog_type = BPF_PROG_TYPE_SOCKET_FILTER; 1098 fixup_skips = skips; 1099 do_test_fixup(test, prog_type, prog, map_fds); 1100 if (test->fill_insns) { 1101 prog = test->fill_insns; 1102 prog_len = test->prog_len; 1103 } else { 1104 prog_len = probe_filter_length(prog); 1105 } 1106 /* If there were some map skips during fixup due to missing bpf 1107 * features, skip this test. 1108 */ 1109 if (fixup_skips != skips) 1110 return; 1111 1112 pflags = BPF_F_TEST_RND_HI32; 1113 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) 1114 pflags |= BPF_F_STRICT_ALIGNMENT; 1115 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) 1116 pflags |= BPF_F_ANY_ALIGNMENT; 1117 if (test->flags & ~3) 1118 pflags |= test->flags; 1119 1120 expected_ret = unpriv && test->result_unpriv != UNDEF ? 1121 test->result_unpriv : test->result; 1122 expected_err = unpriv && test->errstr_unpriv ? 1123 test->errstr_unpriv : test->errstr; 1124 1125 opts.expected_attach_type = test->expected_attach_type; 1126 if (verbose) 1127 opts.log_level = 1; 1128 else if (expected_ret == VERBOSE_ACCEPT) 1129 opts.log_level = 2; 1130 else 1131 opts.log_level = 4; 1132 opts.prog_flags = pflags; 1133 1134 if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) { 1135 int attach_btf_id; 1136 1137 attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc, 1138 opts.expected_attach_type); 1139 if (attach_btf_id < 0) { 1140 printf("FAIL\nFailed to find BTF ID for '%s'!\n", 1141 test->kfunc); 1142 (*errors)++; 1143 return; 1144 } 1145 1146 opts.attach_btf_id = attach_btf_id; 1147 } 1148 1149 opts.log_buf = bpf_vlog; 1150 opts.log_size = sizeof(bpf_vlog); 1151 fd_prog = bpf_prog_load(prog_type, NULL, "GPL", prog, prog_len, &opts); 1152 saved_errno = errno; 1153 1154 /* BPF_PROG_TYPE_TRACING requires more setup and 1155 * bpf_probe_prog_type won't give correct answer 1156 */ 1157 if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING && 1158 !libbpf_probe_bpf_prog_type(prog_type, NULL)) { 1159 printf("SKIP (unsupported program type %d)\n", prog_type); 1160 skips++; 1161 goto close_fds; 1162 } 1163 1164 if (fd_prog < 0 && saved_errno == ENOTSUPP) { 1165 printf("SKIP (program uses an unsupported feature)\n"); 1166 skips++; 1167 goto close_fds; 1168 } 1169 1170 alignment_prevented_execution = 0; 1171 1172 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) { 1173 if (fd_prog < 0) { 1174 printf("FAIL\nFailed to load prog '%s'!\n", 1175 strerror(saved_errno)); 1176 goto fail_log; 1177 } 1178 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1179 if (fd_prog >= 0 && 1180 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) 1181 alignment_prevented_execution = 1; 1182 #endif 1183 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) { 1184 goto fail_log; 1185 } 1186 } else { 1187 if (fd_prog >= 0) { 1188 printf("FAIL\nUnexpected success to load!\n"); 1189 goto fail_log; 1190 } 1191 if (!expected_err || !cmp_str_seq(bpf_vlog, expected_err)) { 1192 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n", 1193 expected_err, bpf_vlog); 1194 goto fail_log; 1195 } 1196 } 1197 1198 if (!unpriv && test->insn_processed) { 1199 uint32_t insn_processed; 1200 char *proc; 1201 1202 proc = strstr(bpf_vlog, "processed "); 1203 insn_processed = atoi(proc + 10); 1204 if (test->insn_processed != insn_processed) { 1205 printf("FAIL\nUnexpected insn_processed %u vs %u\n", 1206 insn_processed, test->insn_processed); 1207 goto fail_log; 1208 } 1209 } 1210 1211 if (verbose) 1212 printf(", verifier log:\n%s", bpf_vlog); 1213 1214 run_errs = 0; 1215 run_successes = 0; 1216 if (!alignment_prevented_execution && fd_prog >= 0 && test->runs >= 0) { 1217 uint32_t expected_val; 1218 int i; 1219 1220 if (!test->runs) 1221 test->runs = 1; 1222 1223 for (i = 0; i < test->runs; i++) { 1224 if (unpriv && test->retvals[i].retval_unpriv) 1225 expected_val = test->retvals[i].retval_unpriv; 1226 else 1227 expected_val = test->retvals[i].retval; 1228 1229 err = do_prog_test_run(fd_prog, unpriv, expected_val, 1230 test->retvals[i].data, 1231 sizeof(test->retvals[i].data)); 1232 if (err) { 1233 printf("(run %d/%d) ", i + 1, test->runs); 1234 run_errs++; 1235 } else { 1236 run_successes++; 1237 } 1238 } 1239 } 1240 1241 if (!run_errs) { 1242 (*passes)++; 1243 if (run_successes > 1) 1244 printf("%d cases ", run_successes); 1245 printf("OK"); 1246 if (alignment_prevented_execution) 1247 printf(" (NOTE: not executed due to unknown alignment)"); 1248 printf("\n"); 1249 } else { 1250 printf("\n"); 1251 goto fail_log; 1252 } 1253 close_fds: 1254 if (test->fill_insns) 1255 free(test->fill_insns); 1256 close(fd_prog); 1257 for (i = 0; i < MAX_NR_MAPS; i++) 1258 close(map_fds[i]); 1259 sched_yield(); 1260 return; 1261 fail_log: 1262 (*errors)++; 1263 printf("%s", bpf_vlog); 1264 goto close_fds; 1265 } 1266 1267 static bool is_admin(void) 1268 { 1269 __u64 caps; 1270 1271 /* The test checks for finer cap as CAP_NET_ADMIN, 1272 * CAP_PERFMON, and CAP_BPF instead of CAP_SYS_ADMIN. 1273 * Thus, disable CAP_SYS_ADMIN at the beginning. 1274 */ 1275 if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) { 1276 perror("cap_disable_effective(CAP_SYS_ADMIN)"); 1277 return false; 1278 } 1279 1280 return (caps & ADMIN_CAPS) == ADMIN_CAPS; 1281 } 1282 1283 static void get_unpriv_disabled() 1284 { 1285 char buf[2]; 1286 FILE *fd; 1287 1288 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); 1289 if (!fd) { 1290 perror("fopen /proc/sys/"UNPRIV_SYSCTL); 1291 unpriv_disabled = true; 1292 return; 1293 } 1294 if (fgets(buf, 2, fd) == buf && atoi(buf)) 1295 unpriv_disabled = true; 1296 fclose(fd); 1297 } 1298 1299 static bool test_as_unpriv(struct bpf_test *test) 1300 { 1301 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1302 /* Some architectures have strict alignment requirements. In 1303 * that case, the BPF verifier detects if a program has 1304 * unaligned accesses and rejects them. A user can pass 1305 * BPF_F_ANY_ALIGNMENT to a program to override this 1306 * check. That, however, will only work when a privileged user 1307 * loads a program. An unprivileged user loading a program 1308 * with this flag will be rejected prior entering the 1309 * verifier. 1310 */ 1311 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) 1312 return false; 1313 #endif 1314 return !test->prog_type || 1315 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER || 1316 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB; 1317 } 1318 1319 static int do_test(bool unpriv, unsigned int from, unsigned int to) 1320 { 1321 int i, passes = 0, errors = 0; 1322 1323 for (i = from; i < to; i++) { 1324 struct bpf_test *test = &tests[i]; 1325 1326 /* Program types that are not supported by non-root we 1327 * skip right away. 1328 */ 1329 if (test_as_unpriv(test) && unpriv_disabled) { 1330 printf("#%d/u %s SKIP\n", i, test->descr); 1331 skips++; 1332 } else if (test_as_unpriv(test)) { 1333 if (!unpriv) 1334 set_admin(false); 1335 printf("#%d/u %s ", i, test->descr); 1336 do_test_single(test, true, &passes, &errors); 1337 if (!unpriv) 1338 set_admin(true); 1339 } 1340 1341 if (unpriv) { 1342 printf("#%d/p %s SKIP\n", i, test->descr); 1343 skips++; 1344 } else { 1345 printf("#%d/p %s ", i, test->descr); 1346 do_test_single(test, false, &passes, &errors); 1347 } 1348 } 1349 1350 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes, 1351 skips, errors); 1352 return errors ? EXIT_FAILURE : EXIT_SUCCESS; 1353 } 1354 1355 int main(int argc, char **argv) 1356 { 1357 unsigned int from = 0, to = ARRAY_SIZE(tests); 1358 bool unpriv = !is_admin(); 1359 int arg = 1; 1360 1361 if (argc > 1 && strcmp(argv[1], "-v") == 0) { 1362 arg++; 1363 verbose = true; 1364 argc--; 1365 } 1366 1367 if (argc == 3) { 1368 unsigned int l = atoi(argv[arg]); 1369 unsigned int u = atoi(argv[arg + 1]); 1370 1371 if (l < to && u < to) { 1372 from = l; 1373 to = u + 1; 1374 } 1375 } else if (argc == 2) { 1376 unsigned int t = atoi(argv[arg]); 1377 1378 if (t < to) { 1379 from = t; 1380 to = t + 1; 1381 } 1382 } 1383 1384 get_unpriv_disabled(); 1385 if (unpriv && unpriv_disabled) { 1386 printf("Cannot run as unprivileged user with sysctl %s.\n", 1387 UNPRIV_SYSCTL); 1388 return EXIT_FAILURE; 1389 } 1390 1391 /* Use libbpf 1.0 API mode */ 1392 libbpf_set_strict_mode(LIBBPF_STRICT_ALL); 1393 1394 bpf_semi_rand_init(); 1395 return do_test(unpriv, from, to); 1396 } 1397